/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apimachinery
import (
"crypto/sha1"
"fmt"
"k8s.io/apimachinery/pkg/util/validation"
)
// CalculateVirtualMachineInstanceID calculates a stable and unique identifier for a VMI based on its name attribute.
// For VMI names longer than 63 characters, the name is a truncated and hashed to ensure uniqueness.
func CalculateVirtualMachineInstanceID(vmiName string) string {
if len(vmiName) <= validation.DNS1035LabelMaxLength {
return vmiName
}
const (
hashLength = 8
vmiNamePrefixMaxLength = validation.DNS1035LabelMaxLength - hashLength - 1
)
truncatedVMIName := vmiName[:vmiNamePrefixMaxLength]
hasher := sha1.New()
hasher.Write([]byte(vmiName))
vmiNameHash := fmt.Sprintf("%x", hasher.Sum(nil))
return fmt.Sprintf("%s-%s", truncatedVMIName, vmiNameHash[:hashLength])
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package patch
import (
"encoding/json"
"fmt"
"strings"
)
type PatchOperation struct {
Op string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value"`
}
const (
PatchReplaceOp = "replace"
PatchTestOp = "test"
PatchAddOp = "add"
PatchRemoveOp = "remove"
)
func (p *PatchOperation) MarshalJSON() ([]byte, error) {
switch p.Op {
// The 'remove' operation is the only patching operation without a value
// and it needs to be parsed differently.
case PatchRemoveOp:
return json.Marshal(&struct {
Op string `json:"op"`
Path string `json:"path"`
}{
Op: p.Op,
Path: p.Path,
})
case PatchTestOp, PatchReplaceOp, PatchAddOp:
return json.Marshal(&struct {
Op string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value"`
}{
Op: p.Op,
Path: p.Path,
Value: p.Value,
})
default:
return nil, fmt.Errorf("operation %s not recognized", p.Op)
}
}
type PatchSet struct {
patches []PatchOperation
}
type PatchOption func(patches *PatchSet)
func New(opts ...PatchOption) *PatchSet {
p := &PatchSet{}
p.AddOption(opts...)
return p
}
func (p *PatchSet) GetPatches() []PatchOperation {
return p.patches
}
func (p *PatchSet) AddOption(opts ...PatchOption) {
for _, f := range opts {
f(p)
}
}
func (p *PatchSet) addOp(op, path string, value interface{}) {
p.patches = append(p.patches, PatchOperation{
Op: op,
Path: path,
Value: value,
})
}
func WithTest(path string, value interface{}) PatchOption {
return func(p *PatchSet) {
p.addOp(PatchTestOp, path, value)
}
}
func WithAdd(path string, value interface{}) PatchOption {
return func(p *PatchSet) {
p.addOp(PatchAddOp, path, value)
}
}
func WithReplace(path string, value interface{}) PatchOption {
return func(p *PatchSet) {
p.addOp(PatchReplaceOp, path, value)
}
}
func WithRemove(path string) PatchOption {
return func(p *PatchSet) {
p.addOp(PatchRemoveOp, path, nil)
}
}
func (p *PatchSet) GeneratePayload() ([]byte, error) {
return GeneratePatchPayload(p.patches...)
}
func (p *PatchSet) IsEmpty() bool {
return len(p.patches) < 1
}
func (p *PatchSet) ToSlice() ([]string, error) {
var result []string
for _, operation := range p.patches {
patch, err := operation.MarshalJSON()
if err != nil {
return nil, err
}
result = append(result, string(patch))
}
return result, nil
}
func GeneratePatchPayload(patches ...PatchOperation) ([]byte, error) {
if len(patches) == 0 {
return nil, fmt.Errorf("list of patches is empty")
}
payloadBytes, err := json.Marshal(patches)
if err != nil {
return nil, err
}
return payloadBytes, nil
}
func GenerateTestReplacePatch(path string, oldValue, newValue interface{}) ([]byte, error) {
return GeneratePatchPayload(
PatchOperation{
Op: PatchTestOp,
Path: path,
Value: oldValue,
},
PatchOperation{
Op: PatchReplaceOp,
Path: path,
Value: newValue,
},
)
}
func UnmarshalPatch(patch []byte) ([]PatchOperation, error) {
var p []PatchOperation
err := json.Unmarshal(patch, &p)
return p, err
}
func EscapeJSONPointer(ptr string) string {
s := strings.ReplaceAll(ptr, "~", "~0")
return strings.ReplaceAll(s, "/", "~1")
}
package bootstrap
import (
"crypto/tls"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/fsnotify/fsnotify"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/certificate"
"kubevirt.io/kubevirt/pkg/certificates/triple"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/certificates/triple/cert"
)
const (
CertBytesValue = "tls.crt"
KeyBytesValue = "tls.key"
)
type FileCertificateManager struct {
stopCh chan struct{}
certAccessLock sync.Mutex
stopped bool
cert *tls.Certificate
certBytesPath string
keyBytesPath string
errorRetryInterval time.Duration
}
// NewFallbackCertificateManager returns a certificate manager which can fall back to a self signed certificate,
// if there is currently no kubevirt installation present on the cluster. This helps dealing with situations where e.g.
// readiness probes try to access an API which can't right now provide a fully managed certificate.
// virt-operator is the main recipient of this manager, since the certificate management infrastructure is not always
// already present when virt-operator gets created.
func NewFallbackCertificateManager(certManager certificate.Manager) *FallbackCertificateManager {
caKeyPair, _ := triple.NewCA("kubevirt.io", time.Hour*24*7)
keyPair, _ := triple.NewServerKeyPair(
caKeyPair,
"fallback.certificate.kubevirt.io",
"fallback",
"fallback",
"cluster.local",
nil,
nil,
time.Hour*24*356*10,
)
crt, err := tls.X509KeyPair(cert.EncodeCertPEM(keyPair.Cert), cert.EncodePrivateKeyPEM(keyPair.Key))
if err != nil {
log.DefaultLogger().Reason(err).Critical("Failed to generate a fallback certificate.")
}
crt.Leaf = keyPair.Cert
return &FallbackCertificateManager{
certManager: certManager,
fallbackCertificate: &crt,
}
}
type FallbackCertificateManager struct {
certManager certificate.Manager
fallbackCertificate *tls.Certificate
}
func (f *FallbackCertificateManager) Start() {
f.certManager.Start()
}
func (f *FallbackCertificateManager) Stop() {
f.certManager.Stop()
}
func (f *FallbackCertificateManager) Current() *tls.Certificate {
crt := f.certManager.Current()
if crt != nil {
return crt
}
return f.fallbackCertificate
}
func (f *FallbackCertificateManager) ServerHealthy() bool {
return f.certManager.ServerHealthy()
}
func NewFileCertificateManager(certBytesPath string, keyBytesPath string) *FileCertificateManager {
return &FileCertificateManager{
certBytesPath: certBytesPath,
keyBytesPath: keyBytesPath,
stopCh: make(chan struct{}, 1),
errorRetryInterval: 1 * time.Minute,
}
}
func (f *FileCertificateManager) Start() {
objectUpdated := make(chan struct{}, 1)
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.DefaultLogger().Reason(err).Critical("Failed to create an inotify watcher")
}
defer watcher.Close()
certDir := filepath.Dir(f.certBytesPath)
err = watcher.Add(certDir)
if err != nil {
log.DefaultLogger().Reason(err).Criticalf("Failed to establish a watch on %s", f.certBytesPath)
}
keyDir := filepath.Dir(f.keyBytesPath)
if keyDir != certDir {
err = watcher.Add(keyDir)
if err != nil {
log.DefaultLogger().Reason(err).Criticalf("Failed to establish a watch on %s", f.keyBytesPath)
}
}
go func() {
for {
select {
case _, ok := <-watcher.Events:
if !ok {
return
}
select {
case objectUpdated <- struct{}{}:
default:
log.DefaultLogger().V(5).Infof("Dropping redundant wakeup for cert reload")
}
case err, ok := <-watcher.Errors:
if !ok {
return
}
log.DefaultLogger().Reason(err).Errorf("An error occurred when watching certificates files %s and %s", f.certBytesPath, f.keyBytesPath)
}
}
}()
// ensure we load the certificates on startup
objectUpdated <- struct{}{}
sync:
for {
select {
case <-objectUpdated:
if err := f.rotateCerts(); err != nil {
go func() {
time.Sleep(f.errorRetryInterval)
select {
case objectUpdated <- struct{}{}:
default:
log.DefaultLogger().V(5).Infof("Dropping redundant wakeup for cert reload")
}
}()
}
case <-f.stopCh:
break sync
}
}
}
func (f *FileCertificateManager) Stop() {
f.certAccessLock.Lock()
defer f.certAccessLock.Unlock()
if f.stopped {
return
}
close(f.stopCh)
f.stopped = true
}
func (f *FileCertificateManager) ServerHealthy() bool {
panic("implement me")
}
func (s *FileCertificateManager) Current() *tls.Certificate {
s.certAccessLock.Lock()
defer s.certAccessLock.Unlock()
return s.cert
}
func (f *FileCertificateManager) rotateCerts() error {
crt, err := f.loadCertificates()
if err != nil {
log.DefaultLogger().Reason(err).Errorf("failed to load the certificate %s and %s", f.certBytesPath, f.keyBytesPath)
return err
}
f.certAccessLock.Lock()
defer f.certAccessLock.Unlock()
// update after the callback, to ensure that the reconfiguration succeeded
f.cert = crt
log.DefaultLogger().Infof("certificate with common name '%s' retrieved.", crt.Leaf.Subject.CommonName)
return nil
}
func (f *FileCertificateManager) loadCertificates() (serverCrt *tls.Certificate, err error) {
// #nosec No risk for path injection. Used for specific cert file for key rotation
certBytes, err := os.ReadFile(f.certBytesPath)
if err != nil {
return nil, err
}
// #nosec No risk for path injection. Used for specific cert file for key rotation
keyBytes, err := os.ReadFile(f.keyBytesPath)
if err != nil {
return nil, err
}
crt, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
return nil, fmt.Errorf("failed to load certificate: %v\n", err)
}
leaf, err := cert.ParseCertsPEM(certBytes)
if err != nil {
return nil, fmt.Errorf("failed to load leaf certificate: %v\n", err)
}
crt.Leaf = leaf[0]
return &crt, nil
}
type SecretCertificateManager struct {
store cache.Store
secretKey string
tlsCrt string
tlsKey string
crtLock *sync.Mutex
revision string
crt *tls.Certificate
}
func (s *SecretCertificateManager) Start() {
}
func (s *SecretCertificateManager) Stop() {
}
func (s *SecretCertificateManager) Current() *tls.Certificate {
s.crtLock.Lock()
defer s.crtLock.Unlock()
rawSecret, exists, err := s.store.GetByKey(s.secretKey)
if err != nil {
log.DefaultLogger().Reason(err).Errorf("Secret %s can't be retrieved from the cache", s.secretKey)
return s.crt
} else if !exists {
return s.crt
}
secret := rawSecret.(*v1.Secret)
if secret.ObjectMeta.ResourceVersion == s.revision {
return s.crt
}
crt, err := tls.X509KeyPair(secret.Data[s.tlsCrt], secret.Data[s.tlsKey])
if err != nil {
log.DefaultLogger().Reason(err).Errorf("failed to load certificate from secret %s", s.secretKey)
return s.crt
}
leaf, err := cert.ParseCertsPEM(secret.Data[s.tlsCrt])
if err != nil {
log.DefaultLogger().Reason(err).Errorf("failed to load leaf certificate from secret %s", s.secretKey)
return s.crt
}
crt.Leaf = leaf[0]
s.revision = secret.ResourceVersion
s.crt = &crt
return s.crt
}
func (s *SecretCertificateManager) ServerHealthy() bool {
panic("implement me")
}
// NewSecretCertificateManager takes a secret store and the name and the namespace of a secret. If there is a newer
// version of the secret in the cache, the next Current() call will immediately wield it. It takes resource versions
// into account to be efficient.
func NewSecretCertificateManager(name string, namespace string, store cache.Store) *SecretCertificateManager {
return &SecretCertificateManager{
store: store,
secretKey: fmt.Sprintf("%s/%s", namespace, name),
tlsCrt: CertBytesValue,
tlsKey: KeyBytesValue,
crtLock: &sync.Mutex{},
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"errors"
"math"
"math/big"
mathrand "math/rand"
"net"
"time"
)
const (
rsaKeySize = 2048
)
// Config contains the basic fields required for creating a certificate
type Config struct {
CommonName string
Organization []string
AltNames AltNames
Usages []x509.ExtKeyUsage
NotBefore, NotAfter *time.Time
}
// AltNames contains the domain names and IP addresses that will be added
// to the API Server's x509 certificate SubAltNames field. The values will
// be passed directly to the x509.Certificate object.
type AltNames struct {
DNSNames []string
IPs []net.IP
}
// NewRSAPrivateKey creates an RSA private key
func NewRSAPrivateKey() (*rsa.PrivateKey, error) {
return rsa.GenerateKey(cryptorand.Reader, rsaKeySize)
}
// NewECDSAPrivateKey creates an ECDSA private key
func NewECDSAPrivateKey() (*ecdsa.PrivateKey, error) {
return ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
}
// NewSelfSignedCACert creates a CA certificate
func NewSelfSignedCACert(cfg Config, key crypto.Signer, duration time.Duration) (*x509.Certificate, error) {
now := time.Now()
tmpl := x509.Certificate{
SerialNumber: new(big.Int).SetInt64(randomSerialNumber()),
Subject: pkix.Name{
CommonName: cfg.CommonName,
Organization: cfg.Organization,
},
NotBefore: now.UTC(),
NotAfter: now.Add(duration).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
DNSNames: cfg.AltNames.DNSNames,
}
if cfg.NotBefore != nil {
tmpl.NotBefore = *cfg.NotBefore
}
if cfg.NotAfter != nil {
tmpl.NotAfter = *cfg.NotAfter
}
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
if err != nil {
return nil, err
}
return x509.ParseCertificate(certDERBytes)
}
// NewSignedCert creates a signed certificate using the given CA certificate and key
func NewSignedCert(cfg Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer, duration time.Duration) (*x509.Certificate, error) {
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64))
if err != nil {
return nil, err
}
if len(cfg.CommonName) == 0 {
return nil, errors.New("must specify a CommonName")
}
if len(cfg.Usages) == 0 {
return nil, errors.New("must specify at least one ExtKeyUsage")
}
certTmpl := x509.Certificate{
Subject: pkix.Name{
CommonName: cfg.CommonName,
Organization: cfg.Organization,
},
DNSNames: cfg.AltNames.DNSNames,
IPAddresses: cfg.AltNames.IPs,
SerialNumber: serial,
NotBefore: caCert.NotBefore,
NotAfter: time.Now().Add(duration).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: cfg.Usages,
}
if cfg.NotBefore != nil {
certTmpl.NotBefore = *cfg.NotBefore
}
if cfg.NotAfter != nil {
certTmpl.NotAfter = *cfg.NotAfter
}
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey)
if err != nil {
return nil, err
}
return x509.ParseCertificate(certDERBytes)
}
// randomSerialNumber returns a random int64 serial number based on
// time.Now. It is defined separately from the generator interface so
// that the caller doesn't have to worry about an input template or
// error - these are unnecessary when creating a random serial.
func randomSerialNumber() int64 {
r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano()))
return r.Int63()
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
)
const (
// ECPrivateKeyBlockType is a possible value for pem.Block.Type.
ECPrivateKeyBlockType = "EC PRIVATE KEY"
// RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
// PrivateKeyBlockType is a possible value for pem.Block.Type.
PrivateKeyBlockType = "PRIVATE KEY"
// PublicKeyBlockType is a possible value for pem.Block.Type.
PublicKeyBlockType = "PUBLIC KEY"
// CertificateBlockType is a possible value for pem.Block.Type.
CertificateBlockType = "CERTIFICATE"
// CertificateRequestBlockType is a possible value for pem.Block.Type.
CertificateRequestBlockType = "CERTIFICATE REQUEST"
)
// EncodePublicKeyPEM returns PEM-encoded public data
func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) {
der, err := x509.MarshalPKIXPublicKey(key)
if err != nil {
return []byte{}, err
}
block := pem.Block{
Type: PublicKeyBlockType,
Bytes: der,
}
return pem.EncodeToMemory(&block), nil
}
// EncodePrivateKeyPEM returns PEM-encoded private key data
func EncodePrivateKeyPEM(key crypto.PrivateKey) []byte {
switch t := key.(type) {
case *ecdsa.PrivateKey:
derBytes, err := x509.MarshalECPrivateKey(t)
if err != nil {
return nil
}
block := &pem.Block{
Type: ECPrivateKeyBlockType,
Bytes: derBytes,
}
return pem.EncodeToMemory(block)
case *rsa.PrivateKey:
block := &pem.Block{
Type: RSAPrivateKeyBlockType,
Bytes: x509.MarshalPKCS1PrivateKey(t),
}
return pem.EncodeToMemory(block)
default:
return nil
}
}
// EncodeCertPEM returns PEM-endcoded certificate data
func EncodeCertPEM(cert *x509.Certificate) []byte {
block := pem.Block{
Type: CertificateBlockType,
Bytes: cert.Raw,
}
return pem.EncodeToMemory(&block)
}
// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
var privateKeyPemBlock *pem.Block
for {
privateKeyPemBlock, keyData = pem.Decode(keyData)
if privateKeyPemBlock == nil {
break
}
switch privateKeyPemBlock.Type {
case ECPrivateKeyBlockType:
// ECDSA Private Key in ASN.1 format
if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
case RSAPrivateKeyBlockType:
// RSA Private Key in PKCS#1 format
if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
case PrivateKeyBlockType:
// RSA or ECDSA Private Key in unencrypted PKCS#8 format
if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
return key, nil
}
}
// tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
// originally, only the first PEM block was parsed and expected to be a key block
}
// we read all the PEM blocks and didn't recognize one
return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
}
// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
// Reads public keys from both public and private key files.
func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
var block *pem.Block
keys := []interface{}{}
for {
// read the next block
block, keyData = pem.Decode(keyData)
if block == nil {
break
}
// test block against parsing functions
if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
keys = append(keys, &privateKey.PublicKey)
continue
}
if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
keys = append(keys, publicKey)
continue
}
if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
keys = append(keys, &privateKey.PublicKey)
continue
}
if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
keys = append(keys, publicKey)
continue
}
// tolerate non-key PEM blocks for backwards compatibility
// originally, only the first PEM block was parsed and expected to be a key block
}
if len(keys) == 0 {
return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
}
return keys, nil
}
// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
ok := false
certs := []*x509.Certificate{}
for len(pemCerts) > 0 {
var block *pem.Block
block, pemCerts = pem.Decode(pemCerts)
if block == nil {
break
}
// Only use PEM "CERTIFICATE" blocks without extra headers
if block.Type != CertificateBlockType || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return certs, err
}
certs = append(certs, cert)
ok = true
}
if !ok {
return certs, errors.New("data does not contain any valid RSA or ECDSA certificates")
}
return certs, nil
}
// parseRSAPublicKey parses a single RSA public key from the provided data
func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
if cert, err := x509.ParseCertificate(data); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
// Test if parsed key is an RSA Public Key
var pubKey *rsa.PublicKey
var ok bool
if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
}
return pubKey, nil
}
// parseRSAPrivateKey parses a single RSA private key from the provided data
func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
return nil, err
}
}
// Test if parsed key is an RSA Private Key
var privKey *rsa.PrivateKey
var ok bool
if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
}
return privKey, nil
}
// parseECPublicKey parses a single ECDSA public key from the provided data
func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
if cert, err := x509.ParseCertificate(data); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
// Test if parsed key is an ECDSA Public Key
var pubKey *ecdsa.PublicKey
var ok bool
if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
}
return pubKey, nil
}
// parseECPrivateKey parses a single ECDSA private key from the provided data
func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
var err error
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
return nil, err
}
// Test if parsed key is an ECDSA Private Key
var privKey *ecdsa.PrivateKey
var ok bool
if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
}
return privKey, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package triple generates key-certificate pairs for the
// triple (CA, Server, Client).
package triple
import (
"crypto/ecdsa"
"crypto/x509"
"fmt"
"net"
"time"
certutil "kubevirt.io/kubevirt/pkg/certificates/triple/cert"
)
type KeyPair struct {
Key *ecdsa.PrivateKey
Cert *x509.Certificate
}
func NewCA(name string, duration time.Duration) (*KeyPair, error) {
key, err := certutil.NewECDSAPrivateKey()
if err != nil {
return nil, fmt.Errorf("unable to create a private key for a new CA: %v", err)
}
signerName := fmt.Sprintf("%s@%d", name, time.Now().Unix())
config := certutil.Config{
CommonName: signerName,
}
cert, err := certutil.NewSelfSignedCACert(config, key, duration)
if err != nil {
return nil, fmt.Errorf("unable to create a self-signed certificate for a new CA: %v", err)
}
return &KeyPair{
Key: key,
Cert: cert,
}, nil
}
func NewServerKeyPair(ca *KeyPair, commonName, svcName, svcNamespace, dnsDomain string, ips, hostnames []string, duration time.Duration) (*KeyPair, error) {
key, err := certutil.NewECDSAPrivateKey()
if err != nil {
return nil, fmt.Errorf("unable to create a server private key: %v", err)
}
namespacedName := fmt.Sprintf("%s.%s", svcName, svcNamespace)
internalAPIServerFQDN := []string{
svcName,
namespacedName,
fmt.Sprintf("%s.svc", namespacedName),
fmt.Sprintf("%s.svc.%s", namespacedName, dnsDomain),
}
altNames := certutil.AltNames{}
for _, ipStr := range ips {
ip := net.ParseIP(ipStr)
if ip != nil {
altNames.IPs = append(altNames.IPs, ip)
}
}
altNames.DNSNames = append(altNames.DNSNames, hostnames...)
altNames.DNSNames = append(altNames.DNSNames, internalAPIServerFQDN...)
config := certutil.Config{
CommonName: commonName,
AltNames: altNames,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key, duration)
if err != nil {
return nil, fmt.Errorf("unable to sign the server certificate: %v", err)
}
return &KeyPair{
Key: key,
Cert: cert,
}, nil
}
func NewClientKeyPair(ca *KeyPair, commonName string, organizations []string, duration time.Duration) (*KeyPair, error) {
key, err := certutil.NewECDSAPrivateKey()
if err != nil {
return nil, fmt.Errorf("unable to create a client private key: %v", err)
}
config := certutil.Config{
CommonName: commonName,
Organization: organizations,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
cert, err := certutil.NewSignedCert(config, key, ca.Cert, ca.Key, duration)
if err != nil {
return nil, fmt.Errorf("unable to sign the client certificate: %v", err)
}
return &KeyPair{
Key: key,
Cert: cert,
}, nil
}
/*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package cloudinit
import (
"encoding/base64"
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/client-go/precond"
diskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
"kubevirt.io/kubevirt/pkg/util"
"kubevirt.io/kubevirt/pkg/util/net/dns"
)
const isoStagingFmt = "%s.staging"
type IsoCreationFunc func(isoOutFile, volumeID string, inDir string) error
var cloudInitLocalDir = "/var/run/libvirt/cloud-init-dir"
var cloudInitIsoFunc = defaultIsoFunc
// Locations of data source disk files
const (
noCloudFile = "noCloud.iso"
configDriveFile = "configdrive.iso"
)
type DataSourceType string
type DeviceMetadataType string
const (
DataSourceNoCloud DataSourceType = "noCloud"
DataSourceConfigDrive DataSourceType = "configDrive"
NICMetadataType DeviceMetadataType = "nic"
HostDevMetadataType DeviceMetadataType = "hostdev"
)
// CloudInitData is a data source independent struct that
// holds cloud-init user and network data
type CloudInitData struct {
DataSource DataSourceType
NoCloudMetaData *NoCloudMetadata
ConfigDriveMetaData *ConfigDriveMetadata
UserData string
NetworkData string
DevicesData *[]DeviceData
VolumeName string
}
type PublicSSHKey struct {
string
}
type NoCloudMetadata struct {
InstanceType string `json:"instance-type,omitempty"`
InstanceID string `json:"instance-id"`
LocalHostname string `json:"local-hostname,omitempty"`
PublicSSHKeys map[string]string `json:"public-keys,omitempty"`
}
type ConfigDriveMetadata struct {
InstanceType string `json:"instance_type,omitempty"`
InstanceID string `json:"instance_id"`
LocalHostname string `json:"local_hostname,omitempty"`
Hostname string `json:"hostname,omitempty"`
UUID string `json:"uuid,omitempty"`
Devices *[]DeviceData `json:"devices,omitempty"`
PublicSSHKeys map[string]string `json:"public_keys,omitempty"`
}
type DeviceData struct {
Type DeviceMetadataType `json:"type"`
Bus string `json:"bus"`
Address string `json:"address"`
MAC string `json:"mac,omitempty"`
Serial string `json:"serial,omitempty"`
NumaNode uint32 `json:"numaNode,omitempty"`
AlignedCPUs []uint32 `json:"alignedCPUs,omitempty"`
Tags []string `json:"tags"`
}
// IsValidCloudInitData checks if the given CloudInitData object is valid in the sense that GenerateLocalData can be called with it.
func IsValidCloudInitData(cloudInitData *CloudInitData) bool {
return cloudInitData != nil && cloudInitData.UserData != "" && (cloudInitData.NoCloudMetaData != nil || cloudInitData.ConfigDriveMetaData != nil)
}
func cloudInitUUIDFromVMI(vmi *v1.VirtualMachineInstance) string {
if vmi.Spec.Domain.Firmware == nil {
return uuid.NewString()
}
return string(vmi.Spec.Domain.Firmware.UUID)
}
// ReadCloudInitVolumeDataSource scans the given VMI for CloudInit volumes and
// reads their content into a CloudInitData struct. Does not resolve secret refs.
func ReadCloudInitVolumeDataSource(vmi *v1.VirtualMachineInstance, secretSourceDir string) (cloudInitData *CloudInitData, err error) {
precond.MustNotBeNil(vmi)
// ClusterInstancetypeAnnotation will take precedence over a namespaced Instancetype
// for setting instance_type in the metadata
instancetype := vmi.Annotations[v1.ClusterInstancetypeAnnotation]
if instancetype == "" {
instancetype = vmi.Annotations[v1.InstancetypeAnnotation]
}
hostname := dns.SanitizeHostname(vmi)
for _, volume := range vmi.Spec.Volumes {
if volume.CloudInitNoCloud != nil {
keys, err := resolveNoCloudSecrets(vmi, secretSourceDir)
if err != nil {
return nil, err
}
cloudInitData, err = readCloudInitNoCloudSource(volume.CloudInitNoCloud)
cloudInitData.NoCloudMetaData = readCloudInitNoCloudMetaData(hostname, cloudInitUUIDFromVMI(vmi), instancetype, keys)
cloudInitData.VolumeName = volume.Name
return cloudInitData, err
}
if volume.CloudInitConfigDrive != nil {
keys, err := resolveConfigDriveSecrets(vmi, secretSourceDir)
if err != nil {
return nil, err
}
uuid := cloudInitUUIDFromVMI(vmi)
cloudInitData, err = readCloudInitConfigDriveSource(volume.CloudInitConfigDrive)
cloudInitData.ConfigDriveMetaData = readCloudInitConfigDriveMetaData(vmi.Name, uuid, hostname, vmi.Namespace, keys, instancetype)
cloudInitData.VolumeName = volume.Name
return cloudInitData, err
}
}
return nil, nil
}
func isNoCloudAccessCredential(accessCred v1.AccessCredential) bool {
return accessCred.SSHPublicKey != nil && accessCred.SSHPublicKey.PropagationMethod.NoCloud != nil
}
func isConfigDriveAccessCredential(accessCred v1.AccessCredential) bool {
return accessCred.SSHPublicKey != nil && accessCred.SSHPublicKey.PropagationMethod.ConfigDrive != nil
}
func resolveSSHPublicKeys(accessCredentials []v1.AccessCredential, secretSourceDir string, isAccessCredentialValidFunc func(v1.AccessCredential) bool) (map[string]string, error) {
keys := make(map[string]string)
count := 0
for _, accessCred := range accessCredentials {
if !isAccessCredentialValidFunc(accessCred) {
continue
}
secretName := ""
if accessCred.SSHPublicKey.Source.Secret != nil {
secretName = accessCred.SSHPublicKey.Source.Secret.SecretName
}
if secretName == "" {
continue
}
baseDir := filepath.Join(secretSourceDir, secretName+"-access-cred")
files, err := os.ReadDir(baseDir)
if err != nil {
return keys, err
}
for _, file := range files {
if file.IsDir() || strings.HasPrefix(file.Name(), "..") {
continue
}
keyData, err := readFileFromDir(baseDir, file.Name())
if err != nil {
return keys, fmt.Errorf("Unable to read public keys found at volume: %s/%s error: %v", baseDir, file.Name(), err)
}
if keyData == "" {
continue
}
keys[strconv.Itoa(count)] = keyData
count++
}
}
return keys, nil
}
// resolveNoCloudSecrets is looking for CloudInitNoCloud volumes with UserDataSecretRef
// requests. It reads the `userdata` secret the corresponds to the given CloudInitNoCloud
// volume and sets the UserData field on that volume.
//
// Note: when using this function, make sure that your code can access the secret volumes.
func resolveNoCloudSecrets(vmi *v1.VirtualMachineInstance, secretSourceDir string) (map[string]string, error) {
keys, err := resolveSSHPublicKeys(vmi.Spec.AccessCredentials, secretSourceDir, isNoCloudAccessCredential)
if err != nil {
return keys, err
}
volume := findCloudInitNoCloudSecretVolume(vmi.Spec.Volumes)
if volume == nil {
return keys, nil
}
baseDir := filepath.Join(secretSourceDir, volume.Name)
var userDataError, networkDataError error
var userData, networkData string
if volume.CloudInitNoCloud.UserDataSecretRef != nil {
userData, userDataError = readFirstFoundFileFromDir(baseDir, []string{"userdata", "userData"})
}
if volume.CloudInitNoCloud.NetworkDataSecretRef != nil {
networkData, networkDataError = readFirstFoundFileFromDir(baseDir, []string{"networkdata", "networkData"})
}
if userDataError != nil && networkDataError != nil {
return keys, fmt.Errorf("no cloud-init data-source found at volume: %s", volume.Name)
}
if userData != "" {
volume.CloudInitNoCloud.UserData = userData
}
if networkData != "" {
volume.CloudInitNoCloud.NetworkData = networkData
}
return keys, nil
}
// resolveConfigDriveSecrets is looking for CloudInitConfigDriveSource volume source with
// UserDataSecretRef and NetworkDataSecretRef and resolves the secret from the corresponding
// VolumeMount.
//
// Note: when using this function, make sure that your code can access the secret volumes.
func resolveConfigDriveSecrets(vmi *v1.VirtualMachineInstance, secretSourceDir string) (map[string]string, error) {
keys, err := resolveSSHPublicKeys(vmi.Spec.AccessCredentials, secretSourceDir, isConfigDriveAccessCredential)
if err != nil {
return keys, err
}
volume := findCloudInitConfigDriveSecretVolume(vmi.Spec.Volumes)
if volume == nil {
return keys, nil
}
baseDir := filepath.Join(secretSourceDir, volume.Name)
var userDataError, networkDataError error
var userData, networkData string
if volume.CloudInitConfigDrive.UserDataSecretRef != nil {
userData, userDataError = readFirstFoundFileFromDir(baseDir, []string{"userdata", "userData"})
}
if volume.CloudInitConfigDrive.NetworkDataSecretRef != nil {
networkData, networkDataError = readFirstFoundFileFromDir(baseDir, []string{"networkdata", "networkData"})
}
if userDataError != nil && networkDataError != nil {
return keys, fmt.Errorf("no cloud-init data-source found at volume: %s", volume.Name)
}
if userData != "" {
volume.CloudInitConfigDrive.UserData = userData
}
if networkData != "" {
volume.CloudInitConfigDrive.NetworkData = networkData
}
return keys, nil
}
// findCloudInitConfigDriveSecretVolume loops over a given list of volumes and return a pointer
// to the first volume with a CloudInitConfigDrive source and UserDataSecretRef field set.
func findCloudInitConfigDriveSecretVolume(volumes []v1.Volume) *v1.Volume {
for _, volume := range volumes {
if volume.CloudInitConfigDrive == nil {
continue
}
if volume.CloudInitConfigDrive.UserDataSecretRef != nil ||
volume.CloudInitConfigDrive.NetworkDataSecretRef != nil {
return &volume
}
}
return nil
}
func readFirstFoundFileFromDir(basedir string, files []string) (string, error) {
var err error
var data string
for _, file := range files {
data, err = readFileFromDir(basedir, file)
if err == nil {
break
}
}
return data, err
}
func readFileFromDir(basedir, file string) (string, error) {
filePath := filepath.Join(basedir, file)
// #nosec No risk for path injection: basedir & secretFile are static strings
data, err := os.ReadFile(filePath)
if err != nil {
log.Log.Reason(err).Errorf("could not read data from source: %s", filePath)
return "", err
}
return string(data), nil
}
// findCloudInitNoCloudSecretVolume loops over a given list of volumes and return a pointer
// to the first CloudInitNoCloud volume with a UserDataSecretRef field set.
func findCloudInitNoCloudSecretVolume(volumes []v1.Volume) *v1.Volume {
for _, volume := range volumes {
if volume.CloudInitNoCloud == nil {
continue
}
if volume.CloudInitNoCloud.UserDataSecretRef != nil ||
volume.CloudInitNoCloud.NetworkDataSecretRef != nil {
return &volume
}
}
return nil
}
func readRawOrBase64Data(rawData, base64Data string) (string, error) {
if rawData != "" {
return rawData, nil
} else if base64Data != "" {
bytes, err := base64.StdEncoding.DecodeString(base64Data)
return string(bytes), err
}
return "", nil
}
// readCloudInitData reads user and network data raw or in base64 encoding,
// regardless from which data source they are coming from
func readCloudInitData(userData, userDataBase64, networkData, networkDataBase64 string) (string, string, error) {
readUserData, err := readRawOrBase64Data(userData, userDataBase64)
if err != nil {
return "", "", err
}
readNetworkData, err := readRawOrBase64Data(networkData, networkDataBase64)
if err != nil {
return "", "", err
}
if readUserData == "" && readNetworkData == "" {
return "", "", fmt.Errorf("userDataBase64, userData, networkDataBase64 or networkData is required for a cloud-init data source")
}
return readUserData, readNetworkData, nil
}
func readCloudInitNoCloudSource(source *v1.CloudInitNoCloudSource) (*CloudInitData, error) {
userData, networkData, err := readCloudInitData(source.UserData,
source.UserDataBase64, source.NetworkData, source.NetworkDataBase64)
if err != nil {
return &CloudInitData{}, err
}
return &CloudInitData{
DataSource: DataSourceNoCloud,
UserData: userData,
NetworkData: networkData,
}, nil
}
func readCloudInitConfigDriveSource(source *v1.CloudInitConfigDriveSource) (*CloudInitData, error) {
userData, networkData, err := readCloudInitData(source.UserData,
source.UserDataBase64, source.NetworkData, source.NetworkDataBase64)
if err != nil {
return &CloudInitData{}, err
}
return &CloudInitData{
DataSource: DataSourceConfigDrive,
UserData: userData,
NetworkData: networkData,
}, nil
}
func readCloudInitNoCloudMetaData(hostname, instanceId string, instanceType string, keys map[string]string) *NoCloudMetadata {
return &NoCloudMetadata{
InstanceType: instanceType,
InstanceID: instanceId,
LocalHostname: hostname,
PublicSSHKeys: keys,
}
}
func readCloudInitConfigDriveMetaData(name, uuid, hostname, namespace string, keys map[string]string, instanceType string) *ConfigDriveMetadata {
return &ConfigDriveMetadata{
InstanceType: instanceType,
UUID: uuid,
InstanceID: fmt.Sprintf("%s.%s", name, namespace),
Hostname: hostname,
PublicSSHKeys: keys,
}
}
func defaultIsoFunc(isoOutFile, volumeID string, inDir string) error {
var args []string
args = append(args, "-output")
args = append(args, isoOutFile)
args = append(args, "-volid")
args = append(args, volumeID)
args = append(args, "-joliet")
args = append(args, "-rock")
args = append(args, "-partition_cyl_align")
args = append(args, "on")
args = append(args, inDir)
isoBinary := "xorrisofs"
// #nosec No risk for attacker injection. Parameters are predefined strings
cmd := exec.Command(isoBinary, args...)
err := cmd.Start()
if err != nil {
log.Log.Reason(err).Errorf("%s cmd failed to start while generating iso file %s", isoBinary, isoOutFile)
return err
}
done := make(chan error)
go func() { done <- cmd.Wait() }()
timeout := time.After(10 * time.Second)
for {
select {
case <-timeout:
log.Log.Errorf("Timed out generating cloud-init iso at path %s", isoOutFile)
cmd.Process.Kill()
case err := <-done:
if err != nil {
log.Log.Reason(err).Errorf("%s returned non-zero exit code while generating iso file %s with args '%s'", isoBinary, isoOutFile, strings.Join(cmd.Args, " "))
return err
}
return nil
}
}
}
// The unit test suite uses this function
func SetIsoCreationFunction(isoFunc IsoCreationFunc) {
cloudInitIsoFunc = isoFunc
}
func SetLocalDirectory(dir string) error {
err := util.MkdirAllWithNosec(dir)
if err != nil {
return fmt.Errorf("unable to initialize cloudInit local cache directory (%s). %v", dir, err)
}
exists, err := diskutils.FileExists(dir)
if err != nil {
return fmt.Errorf("CloudInit local cache directory (%s) does not exist or is inaccessible. %v", dir, err)
} else if exists == false {
return fmt.Errorf("CloudInit local cache directory (%s) does not exist or is inaccessible", dir)
}
SetLocalDirectoryOnly(dir)
return nil
}
// XXX refactor this whole package
// This is just a cheap workaround to make e2e tests pass
func SetLocalDirectoryOnly(dir string) {
cloudInitLocalDir = dir
}
func getDomainBasePath(domain string, namespace string) string {
return fmt.Sprintf("%s/%s/%s", cloudInitLocalDir, namespace, domain)
}
func GetIsoFilePath(source DataSourceType, domain, namespace string) string {
switch source {
case DataSourceNoCloud:
return fmt.Sprintf("%s/%s", getDomainBasePath(domain, namespace), noCloudFile)
case DataSourceConfigDrive:
return fmt.Sprintf("%s/%s", getDomainBasePath(domain, namespace), configDriveFile)
}
return fmt.Sprintf("%s/%s", getDomainBasePath(domain, namespace), noCloudFile)
}
func PrepareLocalPath(vmiName string, namespace string) error {
return util.MkdirAllWithNosec(getDomainBasePath(vmiName, namespace))
}
func GenerateEmptyIso(vmiName string, namespace string, data *CloudInitData, size int64) error {
precond.MustNotBeEmpty(vmiName)
precond.MustNotBeNil(data)
var err error
var isoStaging, iso string
switch data.DataSource {
case DataSourceNoCloud, DataSourceConfigDrive:
iso = GetIsoFilePath(data.DataSource, vmiName, namespace)
default:
return fmt.Errorf("invalid cloud-init data source: '%v'", data.DataSource)
}
isoStaging = fmt.Sprintf(isoStagingFmt, iso)
err = diskutils.RemoveFilesIfExist(isoStaging)
if err != nil {
return err
}
err = util.MkdirAllWithNosec(path.Dir(isoStaging))
if err != nil {
log.Log.Reason(err).Errorf("unable to create cloud-init base path %s", path.Dir(isoStaging))
return err
}
f, err := os.Create(isoStaging)
if err != nil {
return fmt.Errorf("failed to create empty iso: '%s'", isoStaging)
}
err = util.WriteBytes(f, 0, size)
if err != nil {
return err
}
util.CloseIOAndCheckErr(f, &err)
if err != nil {
return err
}
if err := diskutils.DefaultOwnershipManager.UnsafeSetFileOwnership(isoStaging); err != nil {
return err
}
err = os.Rename(isoStaging, iso)
if err != nil {
log.Log.Reason(err).Errorf("Cloud-init failed to rename file %s to %s", isoStaging, iso)
return err
}
log.Log.V(2).Infof("generated empty iso file %s", iso)
return nil
}
func GenerateLocalData(vmi *v1.VirtualMachineInstance, instanceType string, data *CloudInitData) error {
precond.MustNotBeEmpty(vmi.Name)
precond.MustNotBeNil(data)
var metaData []byte
var err error
domainBasePath := getDomainBasePath(vmi.Name, vmi.Namespace)
dataBasePath := fmt.Sprintf("%s/data", domainBasePath)
var dataPath, metaFile, userFile, networkFile, iso, isoStaging string
switch data.DataSource {
case DataSourceNoCloud:
dataPath = dataBasePath
metaFile = fmt.Sprintf("%s/%s", dataPath, "meta-data")
userFile = fmt.Sprintf("%s/%s", dataPath, "user-data")
networkFile = fmt.Sprintf("%s/%s", dataPath, "network-config")
iso = GetIsoFilePath(DataSourceNoCloud, vmi.Name, vmi.Namespace)
isoStaging = fmt.Sprintf(isoStagingFmt, iso)
if data.NoCloudMetaData == nil {
log.Log.V(2).Infof("No metadata found in cloud-init data. Create minimal metadata with instance-id.")
data.NoCloudMetaData = &NoCloudMetadata{
InstanceID: cloudInitUUIDFromVMI(vmi),
}
data.NoCloudMetaData.InstanceType = instanceType
}
metaData, err = json.Marshal(data.NoCloudMetaData)
if err != nil {
return err
}
case DataSourceConfigDrive:
dataPath = fmt.Sprintf("%s/openstack/latest", dataBasePath)
metaFile = fmt.Sprintf("%s/%s", dataPath, "meta_data.json")
userFile = fmt.Sprintf("%s/%s", dataPath, "user_data")
networkFile = fmt.Sprintf("%s/%s", dataPath, "network_data.json")
iso = GetIsoFilePath(DataSourceConfigDrive, vmi.Name, vmi.Namespace)
isoStaging = fmt.Sprintf(isoStagingFmt, iso)
if data.ConfigDriveMetaData == nil {
log.Log.V(2).Infof("No metadata found in cloud-init data. Create minimal metadata with instance-id.")
instanceId := fmt.Sprintf("%s.%s", vmi.Name, vmi.Namespace)
data.ConfigDriveMetaData = &ConfigDriveMetadata{
InstanceID: instanceId,
UUID: cloudInitUUIDFromVMI(vmi),
}
data.ConfigDriveMetaData.InstanceType = instanceType
}
data.ConfigDriveMetaData.Devices = data.DevicesData
metaData, err = json.Marshal(data.ConfigDriveMetaData)
if err != nil {
return err
}
default:
return fmt.Errorf("Invalid cloud-init data source: '%v'", data.DataSource)
}
err = util.MkdirAllWithNosec(dataPath)
if err != nil {
log.Log.Reason(err).Errorf("unable to create cloud-init base path %s", domainBasePath)
return err
}
if data.UserData == "" && data.NetworkData == "" {
return fmt.Errorf("UserData or NetworkData is required for cloud-init data source")
}
userData := []byte(data.UserData)
var networkData []byte
if data.NetworkData != "" {
networkData = []byte(data.NetworkData)
}
err = diskutils.RemoveFilesIfExist(userFile, metaFile, networkFile, isoStaging)
if err != nil {
return err
}
err = os.WriteFile(userFile, userData, 0600)
if err != nil {
return err
}
defer os.Remove(userFile)
err = os.WriteFile(metaFile, metaData, 0600)
if err != nil {
return err
}
defer os.Remove(metaFile)
if len(networkData) > 0 {
err = os.WriteFile(networkFile, networkData, 0600)
if err != nil {
return err
}
defer os.Remove(networkFile)
}
switch data.DataSource {
case DataSourceNoCloud:
err = cloudInitIsoFunc(isoStaging, "cidata", dataBasePath)
case DataSourceConfigDrive:
err = cloudInitIsoFunc(isoStaging, "config-2", dataBasePath)
}
if err != nil {
return err
}
if err := diskutils.DefaultOwnershipManager.UnsafeSetFileOwnership(isoStaging); err != nil {
return err
}
err = os.Rename(isoStaging, iso)
if err != nil {
log.Log.Reason(err).Errorf("Cloud-init failed to rename file %s to %s", isoStaging, iso)
return err
}
log.Log.V(2).Infof("generated nocloud iso file %s", iso)
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package config
import (
"path/filepath"
v1 "kubevirt.io/api/core/v1"
)
// GetConfigMapSourcePath returns a path to ConfigMap mounted on a pod
func GetConfigMapSourcePath(volumeName string) string {
return filepath.Join(ConfigMapSourceDir, volumeName)
}
// GetConfigMapDiskPath returns a path to ConfigMap iso image created based on a volume name
func GetConfigMapDiskPath(volumeName string) string {
return filepath.Join(ConfigMapDisksDir, volumeName+".iso")
}
type confgMapVolumeInfo struct{}
func (i confgMapVolumeInfo) isValidType(v *v1.Volume) bool {
return v.ConfigMap != nil
}
func (i confgMapVolumeInfo) getSourcePath(v *v1.Volume) string {
return GetConfigMapSourcePath(v.Name)
}
func (i confgMapVolumeInfo) getIsoPath(v *v1.Volume) string {
return GetConfigMapDiskPath(v.Name)
}
func (i confgMapVolumeInfo) getLabel(v *v1.Volume) string {
return v.ConfigMap.VolumeLabel
}
// CreateConfigMapDisks creates ConfigMap iso disks which are attached to vmis
func CreateConfigMapDisks(vmi *v1.VirtualMachineInstance, emptyIso bool) error {
return createIsoDisksForConfigVolumes(vmi, emptyIso, confgMapVolumeInfo{})
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package config
import (
"fmt"
"os"
"os/exec"
"path/filepath"
ephemeraldiskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
"kubevirt.io/kubevirt/pkg/util"
v1 "kubevirt.io/api/core/v1"
)
type (
// Type represents allowed config types like ConfigMap or Secret
Type string
isoCreationFunc func(output string, volID string, files []string) error
)
const (
// ConfigMap respresents a configmap type,
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/
ConfigMap Type = "configmap"
// Secret represents a secret type,
// https://kubernetes.io/docs/concepts/configuration/secret/
Secret Type = "secret"
// DownwardAPI represents a DownwardAPI type,
// https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/
DownwardAPI Type = "downwardapi"
// ServiceAccount represents a secret type,
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
ServiceAccount Type = "serviceaccount"
mountBaseDir = "/var/run/kubevirt-private"
)
var (
// ConfigMapSourceDir represents a location where ConfigMap is attached to the pod
ConfigMapSourceDir = filepath.Join(mountBaseDir, "config-map")
// SysprepSourceDir represents a location where a Sysprep is attached to the pod
SysprepSourceDir = filepath.Join(mountBaseDir, "sysprep")
// SecretSourceDir represents a location where Secrets is attached to the pod
SecretSourceDir = filepath.Join(mountBaseDir, "secret")
// DownwardAPISourceDir represents a location where downwardapi is attached to the pod
DownwardAPISourceDir = filepath.Join(mountBaseDir, "downwardapi")
// ServiceAccountSourceDir represents the location where the ServiceAccount token is attached to the pod
ServiceAccountSourceDir = "/var/run/secrets/kubernetes.io/serviceaccount/"
// ConfigMapDisksDir represents a path to ConfigMap iso images
ConfigMapDisksDir = filepath.Join(mountBaseDir, "config-map-disks")
// SecretDisksDir represents a path to Secrets iso images
SecretDisksDir = filepath.Join(mountBaseDir, "secret-disks")
// SysprepDisksDir represents a path to Syspreps iso images
SysprepDisksDir = filepath.Join(mountBaseDir, "sysprep-disks")
// DownwardAPIDisksDir represents a path to DownwardAPI iso images
DownwardAPIDisksDir = filepath.Join(mountBaseDir, "downwardapi-disks")
// DownwardMetricDisksDir represents a path to DownwardMetric block disk
DownwardMetricDisksDir = filepath.Join(mountBaseDir, "downwardmetric-disk")
// DownwardMetricDisks represents the disk location for the DownwardMetric disk
DownwardMetricDisk = filepath.Join(DownwardAPIDisksDir, "vhostmd0")
// ServiceAccountDiskDir represents a path to the ServiceAccount iso image
ServiceAccountDiskDir = filepath.Join(mountBaseDir, "service-account-disk")
// ServiceAccountDiskName represents the name of the ServiceAccount iso image
ServiceAccountDiskName = "service-account.iso"
createISOImage = defaultCreateIsoImage
createEmptyISOImage = defaultCreateEmptyIsoImage
)
// The unit test suite uses this function
func setIsoCreationFunction(isoFunc isoCreationFunc) {
createISOImage = isoFunc
}
func getFilesLayout(dirPath string) ([]string, error) {
var filesPath []string
files, err := os.ReadDir(dirPath)
if err != nil {
return nil, err
}
for _, file := range files {
fileName := file.Name()
filesPath = append(filesPath, fileName+"="+filepath.Join(dirPath, fileName))
}
return filesPath, nil
}
func defaultCreateIsoImage(iso string, volID string, files []string) error {
if volID == "" {
volID = "cfgdata"
}
isoStaging := fmt.Sprintf("%s.staging", iso)
var args []string
args = append(args, "-output")
args = append(args, isoStaging)
args = append(args, "-follow-links")
args = append(args, "-volid")
args = append(args, volID)
args = append(args, "-joliet")
args = append(args, "-rock")
args = append(args, "-graft-points")
args = append(args, "-partition_cyl_align")
args = append(args, "on")
args = append(args, files...)
isoBinary := "xorrisofs"
// #nosec No risk for attacker injection. Parameters are predefined strings
cmd := exec.Command(isoBinary, args...)
err := cmd.Run()
if err != nil {
return err
}
err = os.Rename(isoStaging, iso)
return err
}
func defaultCreateEmptyIsoImage(iso string, size int64) error {
isoStaging := fmt.Sprintf("%s.staging", iso)
f, err := os.Create(isoStaging)
if err != nil {
return fmt.Errorf("failed to create empty iso: '%s'", isoStaging)
}
err = util.WriteBytes(f, 0, size)
if err != nil {
return err
}
util.CloseIOAndCheckErr(f, &err)
if err != nil {
return err
}
err = os.Rename(isoStaging, iso)
return err
}
func createIsoConfigImage(output string, volID string, files []string, size int64) error {
var err error
if size == 0 {
err = createISOImage(output, volID, files)
} else {
err = createEmptyISOImage(output, size)
}
if err != nil {
return err
}
return nil
}
func findIsoSize(vmi *v1.VirtualMachineInstance, volume *v1.Volume, emptyIso bool) (int64, error) {
if emptyIso {
for _, vs := range vmi.Status.VolumeStatus {
if vs.Name == volume.Name {
return vs.Size, nil
}
}
return 0, fmt.Errorf("failed to find the status of volume %s", volume.Name)
}
return 0, nil
}
type volumeInfo interface {
isValidType(*v1.Volume) bool
getSourcePath(*v1.Volume) string
getIsoPath(*v1.Volume) string
getLabel(*v1.Volume) string
}
func createIsoDisksForConfigVolumes(vmi *v1.VirtualMachineInstance, emptyIso bool, info volumeInfo) error {
volumes := make(map[string]v1.Volume)
for _, volume := range vmi.Spec.Volumes {
if info.isValidType(&volume) {
volumes[volume.Name] = volume
}
}
for _, disk := range vmi.Spec.Domain.Devices.Disks {
volume, ok := volumes[disk.Name]
if !ok {
continue
}
filesPath, err := getFilesLayout(info.getSourcePath(&volume))
if err != nil {
return err
}
isoPath := info.getIsoPath(&volume)
vmiIsoSize, err := findIsoSize(vmi, &volume, emptyIso)
if err != nil {
return err
}
label := info.getLabel(&volume)
if err := createIsoConfigImage(isoPath, label, filesPath, vmiIsoSize); err != nil {
return err
}
if err := ephemeraldiskutils.DefaultOwnershipManager.UnsafeSetFileOwnership(isoPath); err != nil {
return err
}
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package config
import (
"path/filepath"
v1 "kubevirt.io/api/core/v1"
)
// GetDownwardAPISourcePath returns a path to downwardAPI mounted on a pod
func GetDownwardAPISourcePath(volumeName string) string {
return filepath.Join(DownwardAPISourceDir, volumeName)
}
// GetDownwardAPIDiskPath returns a path to downwardAPI iso image created based on volume name
func GetDownwardAPIDiskPath(volumeName string) string {
return filepath.Join(DownwardAPIDisksDir, volumeName+".iso")
}
type downwardAPIVolumeInfo struct{}
func (i downwardAPIVolumeInfo) isValidType(v *v1.Volume) bool {
return v.DownwardAPI != nil
}
func (i downwardAPIVolumeInfo) getSourcePath(v *v1.Volume) string {
return GetDownwardAPISourcePath(v.Name)
}
func (i downwardAPIVolumeInfo) getIsoPath(v *v1.Volume) string {
return GetDownwardAPIDiskPath(v.Name)
}
func (i downwardAPIVolumeInfo) getLabel(v *v1.Volume) string {
return v.DownwardAPI.VolumeLabel
}
// CreateDownwardAPIDisks creates DownwardAPI iso disks which are attached to vmis
func CreateDownwardAPIDisks(vmi *v1.VirtualMachineInstance, emptyIso bool) error {
return createIsoDisksForConfigVolumes(vmi, emptyIso, downwardAPIVolumeInfo{})
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package config
import (
"path/filepath"
v1 "kubevirt.io/api/core/v1"
)
// GetSecretSourcePath returns a path to Secret mounted on a pod
func GetSecretSourcePath(volumeName string) string {
return filepath.Join(SecretSourceDir, volumeName)
}
// GetSecretDiskPath returns a path to Secret iso image created based on volume name
func GetSecretDiskPath(volumeName string) string {
return filepath.Join(SecretDisksDir, volumeName+".iso")
}
type secretVolumeInfo struct{}
func (i secretVolumeInfo) isValidType(v *v1.Volume) bool {
return v.Secret != nil
}
func (i secretVolumeInfo) getSourcePath(v *v1.Volume) string {
return GetSecretSourcePath(v.Name)
}
func (i secretVolumeInfo) getIsoPath(v *v1.Volume) string {
return GetSecretDiskPath(v.Name)
}
func (i secretVolumeInfo) getLabel(v *v1.Volume) string {
return v.Secret.VolumeLabel
}
// CreateSecretDisks creates Secret iso disks which are attached to vmis
func CreateSecretDisks(vmi *v1.VirtualMachineInstance, emptyIso bool) error {
return createIsoDisksForConfigVolumes(vmi, emptyIso, secretVolumeInfo{})
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package config
import (
"path/filepath"
v1 "kubevirt.io/api/core/v1"
)
// GetServiceAccountDiskPath returns a path to the ServiceAccount iso image
func GetServiceAccountDiskPath() string {
return filepath.Join(ServiceAccountDiskDir, ServiceAccountDiskName)
}
type serviceAccountVolumeInfo struct{}
func (i serviceAccountVolumeInfo) isValidType(v *v1.Volume) bool {
return v.ServiceAccount != nil
}
func (i serviceAccountVolumeInfo) getSourcePath(v *v1.Volume) string {
return ServiceAccountSourceDir
}
func (i serviceAccountVolumeInfo) getIsoPath(v *v1.Volume) string {
return GetServiceAccountDiskPath()
}
func (i serviceAccountVolumeInfo) getLabel(v *v1.Volume) string {
return ""
}
// CreateServiceAccountDisk creates the ServiceAccount iso disk which is attached to vmis
func CreateServiceAccountDisk(vmi *v1.VirtualMachineInstance, emptyIso bool) error {
return createIsoDisksForConfigVolumes(vmi, emptyIso, serviceAccountVolumeInfo{})
}
/*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package config
import (
"fmt"
"os"
"path/filepath"
"strings"
v1 "kubevirt.io/api/core/v1"
ephemeraldiskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
)
// Assuming windows does not care what's the exact label.
var sysprepVolumeLabel = "unattendCD"
// GetSysprepSourcePath returns a path to the Sysprep volume mounted on a pod
func GetSysprepSourcePath(volumeName string) string {
return filepath.Join(SysprepSourceDir, volumeName)
}
// GetSysprepDiskPath returns a path to a ConfigMap iso image created based on a volume name
func GetSysprepDiskPath(volumeName string) string {
return filepath.Join(SysprepDisksDir, volumeName+".iso")
}
func sysprepVolumeHasContents(sysprepVolume *v1.SysprepSource) bool {
return sysprepVolume.ConfigMap != nil || sysprepVolume.Secret != nil
}
// Explained here: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/windows-setup-automation-overview
const autounattendFilename = "autounattend.xml"
const unattendFilename = "unattend.xml"
func validateUnattendPresence(dirPath string) error {
files, err := os.ReadDir(dirPath)
if err != nil {
return fmt.Errorf("Error validating that %s or %s have been provided: %w", autounattendFilename, unattendFilename, err)
}
for _, file := range files {
if f := strings.ToLower(file.Name()); f == autounattendFilename || f == unattendFilename {
return nil
}
}
return fmt.Errorf("Sysprep drive should contain %s or %s but neither were found.", autounattendFilename, unattendFilename)
}
// CreateSysprepDisks creates Sysprep iso disks which are attached to vmis from either ConfigMap or Secret as a source
func CreateSysprepDisks(vmi *v1.VirtualMachineInstance, emptyIso bool) error {
for _, volume := range vmi.Spec.Volumes {
if !shouldCreateSysprepDisk(volume.Sysprep) {
continue
}
vmiIsoSize, err := findIsoSize(vmi, &volume, emptyIso)
if err != nil {
return err
}
if err := createSysprepDisk(volume.Name, vmiIsoSize); err != nil {
return err
}
}
return nil
}
func shouldCreateSysprepDisk(volumeSysprep *v1.SysprepSource) bool {
return volumeSysprep != nil && sysprepVolumeHasContents(volumeSysprep)
}
func createSysprepDisk(volumeName string, size int64) error {
sysprepSourcePath := GetSysprepSourcePath(volumeName)
if err := validateUnattendPresence(sysprepSourcePath); err != nil {
return err
}
filesPath, err := getFilesLayout(sysprepSourcePath)
if err != nil {
return err
}
return createIsoImageAndSetFileOwnership(volumeName, filesPath, size)
}
func createIsoImageAndSetFileOwnership(volumeName string, filesPath []string, size int64) error {
disk := GetSysprepDiskPath(volumeName)
if err := createIsoConfigImage(disk, sysprepVolumeLabel, filesPath, size); err != nil {
return err
}
if err := ephemeraldiskutils.DefaultOwnershipManager.UnsafeSetFileOwnership(disk); err != nil {
return err
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package containerdisk
import (
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"slices"
"strconv"
"strings"
kubev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"kubevirt.io/kubevirt/pkg/os/disk"
"kubevirt.io/kubevirt/pkg/safepath"
ephemeraldisk "kubevirt.io/kubevirt/pkg/ephemeral-disk"
v1 "kubevirt.io/api/core/v1"
diskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
var podsBaseDir = util.KubeletPodsDir
var mountBaseDir = filepath.Join(util.VirtShareDir, "/container-disks")
type SocketPathGetter func(vmi *v1.VirtualMachineInstance, volumeIndex int) (string, error)
type KernelBootSocketPathGetter func(vmi *v1.VirtualMachineInstance) (string, error)
const LauncherVolume = "launcher-volume"
const KernelBootName = "kernel-boot"
const KernelBootVolumeName = KernelBootName + "-volume"
const ephemeralStorageOverheadSize = "50M"
var digestRegex = regexp.MustCompile(`sha256:([a-zA-Z0-9]+)`)
func GetLegacyVolumeMountDirOnHost(vmi *v1.VirtualMachineInstance) string {
return filepath.Join(mountBaseDir, string(vmi.UID))
}
func GetVolumeMountDirOnGuest(vmi *v1.VirtualMachineInstance) string {
return filepath.Join(mountBaseDir, string(vmi.UID))
}
func GetVolumeMountDirOnHost(vmi *v1.VirtualMachineInstance) (*safepath.Path, error) {
basepath := ""
foundEntries := 0
foundBasepath := ""
for podUID := range vmi.Status.ActivePods {
basepath = fmt.Sprintf("%s/%s/volumes/kubernetes.io~empty-dir/container-disks", podsBaseDir, string(podUID))
exists, err := diskutils.FileExists(basepath)
if err != nil {
return nil, err
} else if exists {
foundEntries++
foundBasepath = basepath
}
}
if foundEntries == 1 {
return safepath.JoinAndResolveWithRelativeRoot("/", foundBasepath)
} else if foundEntries > 1 {
// Don't mount until outdated pod environments are removed
// otherwise we might stomp on a previous cleanup
return nil, fmt.Errorf("Found multiple pods active for vmi %s/%s. Waiting on outdated pod directories to be removed", vmi.Namespace, vmi.Name)
}
return nil, os.ErrNotExist
}
func GetDiskTargetDirFromHostView(vmi *v1.VirtualMachineInstance) (*safepath.Path, error) {
basepath, err := GetVolumeMountDirOnHost(vmi)
if err != nil {
return nil, err
}
return basepath, nil
}
func GetDiskTargetName(volumeIndex int) string {
return fmt.Sprintf("disk_%d.img", volumeIndex)
}
func GetDiskTargetPathFromLauncherView(volumeIndex int) string {
return filepath.Join(mountBaseDir, GetDiskTargetName(volumeIndex))
}
func GetKernelBootArtifactPathFromLauncherView(artifact string) string {
artifactBase := filepath.Base(artifact)
return filepath.Join(mountBaseDir, KernelBootName, artifactBase)
}
// SetLocalDirectoryOnly TODO: Refactor this package. This package is used by virt-controller
// to set proper paths on the virt-launcher template and by virt-launcher to create directories
// at the right location. The functions have side-effects and mix path setting and creation
// which makes it hard to differentiate the usage per component.
func SetLocalDirectoryOnly(dir string) {
mountBaseDir = dir
}
func SetLocalDirectory(dir string) error {
SetLocalDirectoryOnly(dir)
return os.MkdirAll(dir, 0750)
}
func SetKubeletPodsDirectory(dir string) {
podsBaseDir = dir
}
// used for testing - we don't want to MkdirAll on a production host mount
func setPodsDirectory(dir string) error {
podsBaseDir = dir
return os.MkdirAll(dir, 0750)
}
// NewSocketPathGetter get the socket pat of a containerDisk. For testing a baseDir
// can be provided which can for instance point to /tmp.
func NewSocketPathGetter(baseDir string) SocketPathGetter {
return func(vmi *v1.VirtualMachineInstance, volumeIndex int) (string, error) {
for podUID := range vmi.Status.ActivePods {
basePath := getContainerDiskSocketBasePath(baseDir, string(podUID))
socketPath := filepath.Join(basePath, fmt.Sprintf("disk_%d.sock", volumeIndex))
exists, _ := diskutils.FileExists(socketPath)
if exists {
return socketPath, nil
}
}
return "", fmt.Errorf("container disk socket path not found for vmi \"%s\"", vmi.Name)
}
}
// NewKernelBootSocketPathGetter get the socket pat of the kernel-boot containerDisk. For testing a baseDir
// can be provided which can for instance point to /tmp.
func NewKernelBootSocketPathGetter(baseDir string) KernelBootSocketPathGetter {
return func(vmi *v1.VirtualMachineInstance) (string, error) {
for podUID := range vmi.Status.ActivePods {
basePath := getContainerDiskSocketBasePath(baseDir, string(podUID))
socketPath := filepath.Join(basePath, KernelBootName+".sock")
exists, _ := diskutils.FileExists(socketPath)
if exists {
return socketPath, nil
}
}
return "", fmt.Errorf("kernel boot socket path not found for vmi \"%s\"", vmi.Name)
}
}
func GetImage(root *safepath.Path, imagePath string) (*safepath.Path, error) {
if imagePath != "" {
var err error
resolvedPath, err := root.AppendAndResolveWithRelativeRoot(imagePath)
if err != nil {
return nil, fmt.Errorf("failed to determine custom image path %s: %v", imagePath, err)
}
return resolvedPath, nil
} else {
fallbackPath, err := root.AppendAndResolveWithRelativeRoot(disk.DiskSourceFallbackPath)
if err != nil {
return nil, fmt.Errorf("failed to determine default image path %v: %v", fallbackPath, err)
}
var files []os.DirEntry
err = fallbackPath.ExecuteNoFollow(func(safePath string) (err error) {
files, err = os.ReadDir(safePath)
return err
})
if err != nil {
return nil, fmt.Errorf("failed to check default image path %s: %v", fallbackPath, err)
}
if len(files) == 0 {
return nil, fmt.Errorf("no file found in folder %s, no disk present", disk.DiskSourceFallbackPath)
} else if len(files) > 1 {
return nil, fmt.Errorf("more than one file found in folder %s, only one disk is allowed", disk.DiskSourceFallbackPath)
}
fileName := files[0].Name()
resolvedPath, err := root.AppendAndResolveWithRelativeRoot(disk.DiskSourceFallbackPath, fileName)
if err != nil {
return nil, fmt.Errorf("failed to check default image path %s: %v", imagePath, err)
}
return resolvedPath, nil
}
}
func GenerateInitContainers(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig, imageIDs map[string]string, podVolumeName string, binVolumeName string) []kubev1.Container {
return generateContainersHelper(vmi, config, imageIDs, podVolumeName, binVolumeName, true)
}
func GenerateContainers(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig, imageIDs map[string]string, podVolumeName string, binVolumeName string) []kubev1.Container {
return generateContainersHelper(vmi, config, imageIDs, podVolumeName, binVolumeName, false)
}
func GenerateKernelBootContainer(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig, imageIDs map[string]string, podVolumeName string, binVolumeName string) *kubev1.Container {
return generateKernelBootContainerHelper(vmi, config, imageIDs, podVolumeName, binVolumeName, false)
}
func GenerateKernelBootInitContainer(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig, imageIDs map[string]string, podVolumeName string, binVolumeName string) *kubev1.Container {
return generateKernelBootContainerHelper(vmi, config, imageIDs, podVolumeName, binVolumeName, true)
}
func generateKernelBootContainerHelper(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig, imageIDs map[string]string, podVolumeName string, binVolumeName string, isInit bool) *kubev1.Container {
if !util.HasKernelBootContainerImage(vmi) {
return nil
}
kernelBootContainer := vmi.Spec.Domain.Firmware.KernelBoot.Container
kernelBootVolume := v1.Volume{
Name: KernelBootVolumeName,
VolumeSource: v1.VolumeSource{
ContainerDisk: &v1.ContainerDiskSource{
Image: kernelBootContainer.Image,
ImagePullSecret: kernelBootContainer.ImagePullSecret,
Path: "/",
ImagePullPolicy: kernelBootContainer.ImagePullPolicy,
},
},
}
const fakeVolumeIdx = 0 // volume index makes no difference for kernel-boot container
return generateContainerFromVolume(vmi, config, imageIDs, podVolumeName, binVolumeName, isInit, true, &kernelBootVolume, fakeVolumeIdx)
}
// The controller uses this function to generate the container
// specs for hosting the container registry disks.
func generateContainersHelper(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig, imageIDs map[string]string, podVolumeName string, binVolumeName string, isInit bool) []kubev1.Container {
var containers []kubev1.Container
// Make VirtualMachineInstance Image Wrapper Containers
for index, volume := range vmi.Spec.Volumes {
if volume.Name == KernelBootVolumeName {
continue
}
if container := generateContainerFromVolume(vmi, config, imageIDs, podVolumeName, binVolumeName, isInit, false, &volume, index); container != nil {
containers = append(containers, *container)
}
}
return containers
}
func generateContainerFromVolume(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig, imageIDs map[string]string, podVolumeName, binVolumeName string, isInit, isKernelBoot bool, volume *v1.Volume, volumeIdx int) *kubev1.Container {
if volume.ContainerDisk == nil {
return nil
}
volumeMountDir := GetVolumeMountDirOnGuest(vmi)
diskContainerName := toContainerName(volume.Name)
diskContainerImage := volume.ContainerDisk.Image
if img, exists := imageIDs[volume.Name]; exists {
diskContainerImage = img
}
resources := getMinimalInitContainerDiskResources(vmi, config)
var mountedDiskName string
if isKernelBoot {
mountedDiskName = KernelBootName
} else {
mountedDiskName = "disk_" + strconv.Itoa(volumeIdx)
}
var args []string
var name string
if isInit {
name = diskContainerName + "-init"
args = []string{"--no-op"}
} else {
name = diskContainerName
copyPathArg := path.Join(volumeMountDir, mountedDiskName)
args = []string{"--copy-path", copyPathArg}
}
noPrivilegeEscalation := false
nonRoot := true
var userId int64 = util.NonRootUID
container := &kubev1.Container{
Name: name,
Image: diskContainerImage,
ImagePullPolicy: volume.ContainerDisk.ImagePullPolicy,
Command: []string{"/usr/bin/container-disk"},
Args: args,
VolumeMounts: []kubev1.VolumeMount{
{
Name: podVolumeName,
MountPath: volumeMountDir,
},
{
Name: binVolumeName,
MountPath: "/usr/bin",
},
},
Resources: resources,
SecurityContext: &kubev1.SecurityContext{
RunAsUser: &userId,
RunAsNonRoot: &nonRoot,
AllowPrivilegeEscalation: &noPrivilegeEscalation,
Capabilities: &kubev1.Capabilities{
Drop: []kubev1.Capability{"ALL"},
},
},
}
return container
}
func CreateEphemeralImages(
vmi *v1.VirtualMachineInstance,
diskCreator ephemeraldisk.EphemeralDiskCreatorInterface,
disksInfo map[string]*disk.DiskInfo,
) error {
// The domain is setup to use the COW image instead of the base image. What we have
// to do here is only create the image where the domain expects it (GetDiskTargetPathFromLauncherView)
// for each disk that requires it.
for i, volume := range vmi.Spec.Volumes {
if volume.VolumeSource.ContainerDisk != nil {
info, _ := disksInfo[volume.Name]
if info == nil {
return fmt.Errorf("no disk info provided for volume %s", volume.Name)
}
backingFile := GetDiskTargetPathFromLauncherView(i)
exists, err := diskutils.FileExists(backingFile)
if err != nil {
return err
} else if !exists {
return fmt.Errorf("no supported file disk found for volume found in: %s", backingFile)
}
if err := diskCreator.CreateBackedImageForVolume(volume, backingFile, info.Format); err != nil {
return err
}
}
}
return nil
}
func getContainerDiskSocketBasePath(baseDir, podUID string) string {
return fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/container-disks", baseDir, podUID)
}
// ExtractImageIDsFromSourcePod takes the VMI and its source pod to determine the exact image used by containerdisks and boot container images,
// which is recorded in the status section of a started pod
func ExtractImageIDsFromSourcePod(vmi *v1.VirtualMachineInstance, sourcePod *kubev1.Pod, imageVolumeEnabled bool) (imageIDs map[string]string, err error) {
imageIDs = map[string]string{}
for _, volume := range vmi.Spec.Volumes {
if volume.ContainerDisk == nil {
continue
}
imageIDs[volume.Name] = volume.ContainerDisk.Image
}
if util.HasKernelBootContainerImage(vmi) {
imageIDs[KernelBootVolumeName] = vmi.Spec.Domain.Firmware.KernelBoot.Container.Image
}
containersToCheck := slices.Clone(sourcePod.Status.ContainerStatuses)
if imageVolumeEnabled {
// When imageVolume is Enabled, there are two cases:
// 1. First migration: the Pod was created before the digest was known, so the volumes do not include it.
// We include the initContainerStatuses in containersToCheck so that the loop below can extract the digest
// from the statuses and update imageIDs (similar to how we handle standard containerDisk sidecars When imageVolume is disabled).
// 2. Subsequent migrations: the digest is already available in the Pod volumes.
// The init containers no longer exist at this point and are not needed; we take the full image including the digest directly from the volumes.
//
// TODO: Once the KEP https://github.com/kubernetes/enhancements/issues/5365 is fully implemented and stable
// in all Kubernetes versions supported by KubeVirt, this entire init containers logic should be removed,
// and the digest can be fetched directly from the Pod volume status.
containersToCheck = append(containersToCheck, sourcePod.Status.InitContainerStatuses...)
for _, vol := range sourcePod.Spec.Volumes {
_, isContainerDiskVolume := imageIDs[vol.Name]
if vol.Image == nil || !strings.Contains(vol.Image.Reference, "@sha256:") || !isContainerDiskVolume {
continue
}
imageIDs[vol.Name] = vol.Image.Reference
}
}
for _, status := range containersToCheck {
if !isImageVolume(status.Name) {
continue
}
key := toVolumeName(status.Name)
image, exists := imageIDs[key]
if !exists {
continue
}
imageID, err := toImageWithDigest(image, status.ImageID)
if err != nil {
return nil, err
}
imageIDs[key] = imageID
}
return
}
func toImageWithDigest(image string, imageID string) (string, error) {
baseImage := image
if strings.LastIndex(image, "@sha256:") != -1 {
baseImage = strings.Split(image, "@sha256:")[0]
} else if colonIndex := strings.LastIndex(image, ":"); colonIndex > strings.LastIndex(image, "/") {
baseImage = image[:colonIndex]
}
digestMatches := digestRegex.FindStringSubmatch(imageID)
if len(digestMatches) < 2 {
return "", fmt.Errorf("failed to identify image digest for container %q with id %q", image, imageID)
}
return fmt.Sprintf("%s@sha256:%s", baseImage, digestMatches[1]), nil
}
func isImageVolume(containerName string) bool {
return strings.HasPrefix(containerName, "volume")
}
func toContainerName(volumeName string) string {
return fmt.Sprintf("volume%s", volumeName)
}
func toVolumeName(containerName string) string {
return strings.TrimPrefix(containerName, "volume")
}
// getMinimalInitContainerDiskResources calculates resource requirements for container disk containers
// This function is shared between init containers and regular containers
func getMinimalInitContainerDiskResources(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig) kubev1.ResourceRequirements {
resources := kubev1.ResourceRequirements{}
resources.Requests = make(kubev1.ResourceList)
resources.Limits = make(kubev1.ResourceList)
resources.Requests[kubev1.ResourceCPU] = resource.MustParse("1m")
if cpuRequest := config.GetSupportContainerRequest(v1.ContainerDisk, kubev1.ResourceCPU); cpuRequest != nil {
resources.Requests[kubev1.ResourceCPU] = *cpuRequest
}
resources.Requests[kubev1.ResourceMemory] = resource.MustParse("1M")
if memRequest := config.GetSupportContainerRequest(v1.ContainerDisk, kubev1.ResourceMemory); memRequest != nil {
resources.Requests[kubev1.ResourceMemory] = *memRequest
}
resources.Requests[kubev1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorageOverheadSize)
resources.Limits[kubev1.ResourceCPU] = resource.MustParse("10m")
if cpuLimit := config.GetSupportContainerLimit(v1.ContainerDisk, kubev1.ResourceCPU); cpuLimit != nil {
resources.Limits[kubev1.ResourceCPU] = *cpuLimit
}
resources.Limits[kubev1.ResourceMemory] = resource.MustParse("40M")
if memLimit := config.GetSupportContainerLimit(v1.ContainerDisk, kubev1.ResourceMemory); memLimit != nil {
resources.Limits[kubev1.ResourceMemory] = *memLimit
}
if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
resources.Requests[kubev1.ResourceCPU] = resources.Limits[kubev1.ResourceCPU]
resources.Requests[kubev1.ResourceMemory] = resources.Limits[kubev1.ResourceMemory]
}
return resources
}
// CreateImageVolumeInitContainer creates a single init container for ImageVolume feature
func CreateImageVolumeInitContainer(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig, name, image string, imagePullPolicy kubev1.PullPolicy) kubev1.Container {
resources := getMinimalInitContainerDiskResources(vmi, config)
return kubev1.Container{
Name: fmt.Sprintf("volume%s", name),
Image: image,
ImagePullPolicy: imagePullPolicy,
Command: []string{filepath.Join(util.ContainerBinary, "/usr/bin/container-disk")},
Args: []string{"--no-op"},
Resources: resources,
SecurityContext: &kubev1.SecurityContext{
RunAsUser: pointer.P(int64(util.NonRootUID)),
RunAsNonRoot: pointer.P(true),
AllowPrivilegeEscalation: pointer.P(false),
Capabilities: &kubev1.Capabilities{
Drop: []kubev1.Capability{"ALL"},
},
},
VolumeMounts: []kubev1.VolumeMount{{
Name: LauncherVolume,
MountPath: util.ContainerBinary,
ReadOnly: true,
}},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package controller
import (
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
poolv1 "kubevirt.io/api/pool/v1beta1"
)
type VirtualMachinePoolConditionManager struct {
}
func NewVirtualMachinePoolConditionManager() *VirtualMachinePoolConditionManager {
return &VirtualMachinePoolConditionManager{}
}
func (d *VirtualMachinePoolConditionManager) GetCondition(pool *poolv1.VirtualMachinePool, cond poolv1.VirtualMachinePoolConditionType) *poolv1.VirtualMachinePoolCondition {
if pool == nil {
return nil
}
for _, c := range pool.Status.Conditions {
if c.Type == cond {
return &c
}
}
return nil
}
func (d *VirtualMachinePoolConditionManager) HasCondition(pool *poolv1.VirtualMachinePool, cond poolv1.VirtualMachinePoolConditionType) bool {
return d.GetCondition(pool, cond) != nil
}
func (d *VirtualMachinePoolConditionManager) RemoveCondition(pool *poolv1.VirtualMachinePool, cond poolv1.VirtualMachinePoolConditionType) {
var conds []poolv1.VirtualMachinePoolCondition
for _, c := range pool.Status.Conditions {
if c.Type == cond {
continue
}
conds = append(conds, c)
}
pool.Status.Conditions = conds
}
// UpdateCondition updates the given VirtualMachinePoolCondition, unless it is already set with the same status and reason.
func (d *VirtualMachinePoolConditionManager) UpdateCondition(pool *poolv1.VirtualMachinePool, cond *poolv1.VirtualMachinePoolCondition) {
for i, c := range pool.Status.Conditions {
if c.Type != cond.Type {
continue
}
if c.Status != cond.Status || c.Reason != cond.Reason {
pool.Status.Conditions[i] = *cond
}
return
}
pool.Status.Conditions = append(pool.Status.Conditions, *cond)
}
type VirtualMachineConditionManager struct {
}
func NewVirtualMachineConditionManager() *VirtualMachineConditionManager {
return &VirtualMachineConditionManager{}
}
func (d *VirtualMachineConditionManager) GetCondition(vm *v1.VirtualMachine, cond v1.VirtualMachineConditionType) *v1.VirtualMachineCondition {
if vm == nil {
return nil
}
for _, c := range vm.Status.Conditions {
if c.Type == cond {
return &c
}
}
return nil
}
func (d *VirtualMachineConditionManager) HasCondition(vm *v1.VirtualMachine, cond v1.VirtualMachineConditionType) bool {
return d.GetCondition(vm, cond) != nil
}
func (d *VirtualMachineConditionManager) HasConditionWithStatus(vm *v1.VirtualMachine, cond v1.VirtualMachineConditionType, status k8sv1.ConditionStatus) bool {
c := d.GetCondition(vm, cond)
return c != nil && c.Status == status
}
func (d *VirtualMachineConditionManager) RemoveCondition(vm *v1.VirtualMachine, cond v1.VirtualMachineConditionType) {
var conds []v1.VirtualMachineCondition
for _, c := range vm.Status.Conditions {
if c.Type == cond {
continue
}
conds = append(conds, c)
}
vm.Status.Conditions = conds
}
type VirtualMachineInstanceConditionManager struct {
}
func NewVirtualMachineInstanceConditionManager() *VirtualMachineInstanceConditionManager {
return &VirtualMachineInstanceConditionManager{}
}
// UpdateCondition updates the given VirtualMachineCondition, unless it is already set with the same status and reason.
func (d *VirtualMachineConditionManager) UpdateCondition(vm *v1.VirtualMachine, cond *v1.VirtualMachineCondition) {
for i, c := range vm.Status.Conditions {
if c.Type != cond.Type {
continue
}
if c.Status != cond.Status || c.Reason != cond.Reason {
vm.Status.Conditions[i] = *cond
}
return
}
vm.Status.Conditions = append(vm.Status.Conditions, *cond)
}
func (d *VirtualMachineInstanceConditionManager) CheckFailure(vmi *v1.VirtualMachineInstance, syncErr error, reason string) (changed bool) {
if syncErr != nil {
if d.HasConditionWithStatusAndReason(vmi, v1.VirtualMachineInstanceSynchronized, k8sv1.ConditionFalse, reason) {
return false
}
if d.HasCondition(vmi, v1.VirtualMachineInstanceSynchronized) {
d.RemoveCondition(vmi, v1.VirtualMachineInstanceSynchronized)
}
vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
Type: v1.VirtualMachineInstanceSynchronized,
Reason: reason,
Message: syncErr.Error(),
LastTransitionTime: metav1.Now(),
Status: k8sv1.ConditionFalse,
})
return true
} else if d.HasCondition(vmi, v1.VirtualMachineInstanceSynchronized) {
d.RemoveCondition(vmi, v1.VirtualMachineInstanceSynchronized)
return true
}
return false
}
func (d *VirtualMachineInstanceConditionManager) GetCondition(vmi *v1.VirtualMachineInstance, cond v1.VirtualMachineInstanceConditionType) *v1.VirtualMachineInstanceCondition {
if vmi == nil {
return nil
}
for _, c := range vmi.Status.Conditions {
if c.Type == cond {
return &c
}
}
return nil
}
func (d *VirtualMachineInstanceConditionManager) HasCondition(vmi *v1.VirtualMachineInstance, cond v1.VirtualMachineInstanceConditionType) bool {
return d.GetCondition(vmi, cond) != nil
}
func (d *VirtualMachineInstanceConditionManager) HasConditionWithStatus(vmi *v1.VirtualMachineInstance, cond v1.VirtualMachineInstanceConditionType, status k8sv1.ConditionStatus) bool {
c := d.GetCondition(vmi, cond)
return c != nil && c.Status == status
}
func (d *VirtualMachineInstanceConditionManager) HasConditionWithStatusAndReason(vmi *v1.VirtualMachineInstance, cond v1.VirtualMachineInstanceConditionType, status k8sv1.ConditionStatus, reason string) bool {
c := d.GetCondition(vmi, cond)
return c != nil && c.Status == status && c.Reason == reason
}
func (d *VirtualMachineInstanceConditionManager) RemoveCondition(vmi *v1.VirtualMachineInstance, cond v1.VirtualMachineInstanceConditionType) {
var conds []v1.VirtualMachineInstanceCondition
for _, c := range vmi.Status.Conditions {
if c.Type == cond {
continue
}
conds = append(conds, c)
}
vmi.Status.Conditions = conds
}
// UpdateCondition updates the given VirtualMachineInstanceCondition, unless it is already set with the same status and reason.
func (d *VirtualMachineInstanceConditionManager) UpdateCondition(vmi *v1.VirtualMachineInstance, cond *v1.VirtualMachineInstanceCondition) {
for i, c := range vmi.Status.Conditions {
if c.Type != cond.Type {
continue
}
if c.Status != cond.Status || c.Reason != cond.Reason {
vmi.Status.Conditions[i] = *cond
}
return
}
vmi.Status.Conditions = append(vmi.Status.Conditions, *cond)
}
// AddPodCondition add pod condition to the VM.
func (d *VirtualMachineInstanceConditionManager) AddPodCondition(vmi *v1.VirtualMachineInstance, cond *k8sv1.PodCondition) {
if !d.HasCondition(vmi, v1.VirtualMachineInstanceConditionType(cond.Type)) {
vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
LastProbeTime: cond.LastProbeTime,
LastTransitionTime: cond.LastTransitionTime,
Message: cond.Message,
Reason: cond.Reason,
Status: cond.Status,
Type: v1.VirtualMachineInstanceConditionType(cond.Type),
})
}
}
func (d *VirtualMachineInstanceConditionManager) GetPodCondition(pod *k8sv1.Pod, conditionType k8sv1.PodConditionType) *k8sv1.PodCondition {
for _, cond := range pod.Status.Conditions {
if cond.Type == conditionType {
return &cond
}
}
return nil
}
func (d *VirtualMachineInstanceConditionManager) ConditionsEqual(vmi1, vmi2 *v1.VirtualMachineInstance) bool {
if len(vmi1.Status.Conditions) != len(vmi2.Status.Conditions) {
return false
}
for _, cond1 := range vmi1.Status.Conditions {
if !d.HasConditionWithStatusAndReason(vmi2, cond1.Type, cond1.Status, cond1.Reason) {
return false
}
}
return true
}
type VirtualMachineInstanceMigrationConditionManager struct {
}
func NewVirtualMachineInstanceMigrationConditionManager() *VirtualMachineInstanceMigrationConditionManager {
return &VirtualMachineInstanceMigrationConditionManager{}
}
func (d *VirtualMachineInstanceMigrationConditionManager) HasCondition(migration *v1.VirtualMachineInstanceMigration, cond v1.VirtualMachineInstanceMigrationConditionType) bool {
for _, c := range migration.Status.Conditions {
if c.Type == cond {
return true
}
}
return false
}
func (d *VirtualMachineInstanceMigrationConditionManager) HasConditionWithStatus(migration *v1.VirtualMachineInstanceMigration, cond v1.VirtualMachineInstanceMigrationConditionType, status k8sv1.ConditionStatus) bool {
for _, c := range migration.Status.Conditions {
if c.Type == cond {
return c.Status == status
}
}
return false
}
func (d *VirtualMachineInstanceMigrationConditionManager) RemoveCondition(migration *v1.VirtualMachineInstanceMigration, cond v1.VirtualMachineInstanceMigrationConditionType) {
var conds []v1.VirtualMachineInstanceMigrationCondition
for _, c := range migration.Status.Conditions {
if c.Type == cond {
continue
}
conds = append(conds, c)
}
migration.Status.Conditions = conds
}
// UpdateCondition updates the given VirtualMachineMigrationCondition, unless it is already set with the same status and reason.
func (d *VirtualMachineInstanceMigrationConditionManager) UpdateCondition(mig *v1.VirtualMachineInstanceMigration,
cond *v1.VirtualMachineInstanceMigrationCondition) {
for i, c := range mig.Status.Conditions {
if c.Type != cond.Type {
continue
}
if c.Status != cond.Status || c.Reason != cond.Reason {
mig.Status.Conditions[i] = *cond
}
return
}
mig.Status.Conditions = append(mig.Status.Conditions, *cond)
}
type PodConditionManager struct {
}
func NewPodConditionManager() *PodConditionManager {
return &PodConditionManager{}
}
func (d *PodConditionManager) GetCondition(pod *k8sv1.Pod, cond k8sv1.PodConditionType) *k8sv1.PodCondition {
if pod == nil {
return nil
}
for _, c := range pod.Status.Conditions {
if c.Type == cond {
return &c
}
}
return nil
}
func (d *PodConditionManager) HasCondition(pod *k8sv1.Pod, cond k8sv1.PodConditionType) bool {
return d.GetCondition(pod, cond) != nil
}
func (d *PodConditionManager) HasConditionWithStatus(pod *k8sv1.Pod, cond k8sv1.PodConditionType, status k8sv1.ConditionStatus) bool {
c := d.GetCondition(pod, cond)
return c != nil && c.Status == status
}
func (d *PodConditionManager) HasConditionWithStatusAndReason(pod *k8sv1.Pod, cond k8sv1.PodConditionType, status k8sv1.ConditionStatus, reason string) bool {
c := d.GetCondition(pod, cond)
return c != nil && c.Status == status && c.Reason == reason
}
func (d *PodConditionManager) RemoveCondition(pod *k8sv1.Pod, cond k8sv1.PodConditionType) {
var conds []k8sv1.PodCondition
for _, c := range pod.Status.Conditions {
if c.Type == cond {
continue
}
conds = append(conds, c)
}
pod.Status.Conditions = conds
}
// UpdateCondition updates the given PodCondition, unless it is already set with the same status and reason.
func (d *PodConditionManager) UpdateCondition(pod *k8sv1.Pod, cond *k8sv1.PodCondition) {
for i, c := range pod.Status.Conditions {
if c.Type != cond.Type {
continue
}
if c.Status != cond.Status || c.Reason != cond.Reason {
pod.Status.Conditions[i] = *cond
}
return
}
pod.Status.Conditions = append(pod.Status.Conditions, *cond)
}
func (d *PodConditionManager) ConditionsEqual(pod1, pod2 *k8sv1.Pod) bool {
if len(pod1.Status.Conditions) != len(pod2.Status.Conditions) {
return false
}
for _, cond1 := range pod1.Status.Conditions {
if !d.HasConditionWithStatusAndReason(pod2, cond1.Type, cond1.Status, cond1.Reason) {
return false
}
}
return true
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package controller
import (
"context"
"fmt"
"runtime/debug"
"strings"
"time"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
)
const (
// BurstReplicas is the maximum amount of requests in a row for CRUD operations on resources by controllers,
// to avoid unintentional DoS
BurstReplicas uint = 250
)
// Reasons for vmi events
const (
// FailedCreatePodReason is added in an event and in a vmi controller condition
// when a pod for a vmi controller failed to be created.
FailedCreatePodReason = "FailedCreate"
// SuccessfulCreatePodReason is added in an event when a pod for a vmi controller
// is successfully created.
SuccessfulCreatePodReason = "SuccessfulCreate"
// FailedDeletePodReason is added in an event and in a vmi controller condition
// when a pod for a vmi controller failed to be deleted.
FailedDeletePodReason = "FailedDelete"
// SuccessfulDeletePodReason is added in an event when a pod for a vmi controller
// is successfully deleted.
SuccessfulDeletePodReason = "SuccessfulDelete"
// FailedHandOverPodReason is added in an event and in a vmi controller condition
// when transferring the pod ownership from the controller to virt-hander fails.
FailedHandOverPodReason = "FailedHandOver"
// FailedBackendStorageCreateReason is added when the creation of the backend storage PVC fails.
FailedBackendStorageCreateReason = "FailedBackendStorageCreate"
// FailedBackendStorageProbeReason is added when probing the backend storage PVC fails.
FailedBackendStorageProbeReason = "FailedBackendStorageProbe"
// BackendStorageNotReadyReason is added when the backend storage PVC is pending.
BackendStorageNotReadyReason = "BackendStorageNotReady"
// SuccessfulHandOverPodReason is added in an event
// when the pod ownership transfer from the controller to virt-hander succeeds.
SuccessfulHandOverPodReason = "SuccessfulHandOver"
// FailedDataVolumeImportReason is added in an event when a dynamically generated
// dataVolume reaches the failed status phase.
FailedDataVolumeImportReason = "FailedDataVolumeImport"
// FailedGuaranteePodResourcesReason is added in an event and in a vmi controller condition
// when a pod has been created without a Guaranteed resources.
FailedGuaranteePodResourcesReason = "FailedGuaranteeResources"
// FailedGatherhingClusterTopologyHints is added if the cluster topology hints can't be collected for a VMI by virt-controller
FailedGatherhingClusterTopologyHints = "FailedGatherhingClusterTopologyHints"
// FailedPvcNotFoundReason is added in an event
// when a PVC for a volume was not found.
FailedPvcNotFoundReason = "FailedPvcNotFound"
// SuccessfulMigrationReason is added when a migration attempt completes successfully
SuccessfulMigrationReason = "SuccessfulMigration"
// FailedMigrationReason is added when a migration attempt fails
FailedMigrationReason = "FailedMigration"
// SuccessfulAbortMigrationReason is added when an attempt to abort migration completes successfully
SuccessfulAbortMigrationReason = "SuccessfulAbortMigration"
// MigrationTargetPodUnschedulable is added a migration target pod enters Unschedulable phase
MigrationTargetPodUnschedulable = "migrationTargetPodUnschedulable"
// FailedAbortMigrationReason is added when an attempt to abort migration fails
FailedAbortMigrationReason = "FailedAbortMigration"
// UtilityVolumeMigrationPendingReason is added when a migration is pending due to utility volumes
UtilityVolumeMigrationPendingReason = "UtilityVolumeMigrationPending"
// MissingAttachmentPodReason is set when we have a hotplugged volume, but the attachment pod is missing
MissingAttachmentPodReason = "MissingAttachmentPod"
// PVCNotReadyReason is set when the PVC is not ready to be hot plugged.
PVCNotReadyReason = "PVCNotReady"
// FailedHotplugSyncReason is set when a hotplug specific failure occurs during sync
FailedHotplugSyncReason = "FailedHotplugSync"
// ErrImagePullReason is set when an error has occured while pulling an image for a containerDisk VM volume.
ErrImagePullReason = "ErrImagePull"
// ImagePullBackOffReason is set when an error has occured while pulling an image for a containerDisk VM volume,
// and that kubelet is backing off before retrying.
ImagePullBackOffReason = "ImagePullBackOff"
// NoSuitableNodesForHostModelMigration is set when a VMI with host-model CPU mode tries to migrate but no node
// is suitable for migration (since CPU model / required features are not supported)
NoSuitableNodesForHostModelMigration = "NoSuitableNodesForHostModelMigration"
// FailedPodPatchReason is set when a pod patch error occurs during sync
FailedPodPatchReason = "FailedPodPatch"
// MigrationBackoffReason is set when an error has occured while migrating
// and virt-controller is backing off before retrying.
MigrationBackoffReason = "MigrationBackoff"
)
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, kubevirtNamespace and field selector.
func NewListWatchFromClient(c cache.Getter, resource string, namespace string, fieldSelector fields.Selector, labelSelector labels.Selector) *cache.ListWatch {
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector.String()
options.LabelSelector = labelSelector.String()
return c.Get().
Namespace(namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec).
Do(context.Background()).
Get()
}
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector.String()
options.LabelSelector = labelSelector.String()
options.Watch = true
return c.Get().
Namespace(namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec).
Watch(context.Background())
}
return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
}
func HandlePanic() {
if r := recover(); r != nil {
// Ignoring error - There is nothing to do, if logging fails
_ = log.Log.Level(log.FATAL).Log("stacktrace", debug.Stack(), "msg", r)
}
}
func NewResourceEventHandlerFuncsForWorkqueue(queue workqueue.RateLimitingInterface) cache.ResourceEventHandlerFuncs {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := KeyFunc(obj)
if err == nil {
queue.Add(key)
}
},
UpdateFunc: func(old interface{}, new interface{}) {
key, err := KeyFunc(new)
if err == nil {
queue.Add(key)
}
},
DeleteFunc: func(obj interface{}) {
key, err := KeyFunc(obj)
if err == nil {
queue.Add(key)
}
},
}
}
func MigrationKey(migration *v1.VirtualMachineInstanceMigration) string {
return fmt.Sprintf("%v/%v", migration.ObjectMeta.Namespace, migration.ObjectMeta.Name)
}
func VirtualMachineInstanceKey(vmi *v1.VirtualMachineInstance) string {
return fmt.Sprintf("%v/%v", vmi.ObjectMeta.Namespace, vmi.ObjectMeta.Name)
}
func VirtualMachineKey(vm *v1.VirtualMachine) string {
return fmt.Sprintf("%v/%v", vm.ObjectMeta.Namespace, vm.ObjectMeta.Name)
}
func PodKey(pod *k8sv1.Pod) string {
return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
}
func DataVolumeKey(dataVolume *cdiv1.DataVolume) string {
return fmt.Sprintf("%v/%v", dataVolume.Namespace, dataVolume.Name)
}
func VirtualMachineInstanceKeys(vmis []*v1.VirtualMachineInstance) []string {
keys := []string{}
for _, vmi := range vmis {
keys = append(keys, VirtualMachineInstanceKey(vmi))
}
return keys
}
func VirtualMachineKeys(vms []*v1.VirtualMachine) []string {
keys := []string{}
for _, vm := range vms {
keys = append(keys, VirtualMachineKey(vm))
}
return keys
}
func HasFinalizer(object metav1.Object, finalizer string) bool {
for _, f := range object.GetFinalizers() {
if f == finalizer {
return true
}
}
return false
}
func RemoveFinalizer(object metav1.Object, finalizer string) {
filtered := []string{}
for _, f := range object.GetFinalizers() {
if f != finalizer {
filtered = append(filtered, f)
}
}
object.SetFinalizers(filtered)
}
func AddFinalizer(object metav1.Object, finalizer string) {
if HasFinalizer(object, finalizer) {
return
}
object.SetFinalizers(append(object.GetFinalizers(), finalizer))
}
func ObservedLatestApiVersionAnnotation(object metav1.Object) bool {
annotations := object.GetAnnotations()
if annotations == nil {
return false
}
version, ok := annotations[v1.ControllerAPILatestVersionObservedAnnotation]
if !ok || version != v1.ApiLatestVersion {
return false
}
return true
}
func SetLatestApiVersionAnnotation(object metav1.Object) {
annotations := object.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
annotations[v1.ControllerAPILatestVersionObservedAnnotation] = v1.ApiLatestVersion
annotations[v1.ControllerAPIStorageVersionObservedAnnotation] = v1.ApiStorageVersion
object.SetAnnotations(annotations)
}
func ApplyVolumeRequestOnVMISpec(vmiSpec *v1.VirtualMachineInstanceSpec, request *v1.VirtualMachineVolumeRequest) *v1.VirtualMachineInstanceSpec {
if request.AddVolumeOptions != nil {
alreadyAdded := false
for _, volume := range vmiSpec.Volumes {
if volume.Name == request.AddVolumeOptions.Name {
alreadyAdded = true
break
}
}
if !alreadyAdded {
newVolume := v1.Volume{
Name: request.AddVolumeOptions.Name,
}
if request.AddVolumeOptions.VolumeSource.PersistentVolumeClaim != nil {
pvcSource := request.AddVolumeOptions.VolumeSource.PersistentVolumeClaim.DeepCopy()
pvcSource.Hotpluggable = true
newVolume.VolumeSource.PersistentVolumeClaim = pvcSource
} else if request.AddVolumeOptions.VolumeSource.DataVolume != nil {
dvSource := request.AddVolumeOptions.VolumeSource.DataVolume.DeepCopy()
dvSource.Hotpluggable = true
newVolume.VolumeSource.DataVolume = dvSource
}
vmiSpec.Volumes = append(vmiSpec.Volumes, newVolume)
if request.AddVolumeOptions.Disk != nil {
newDisk := request.AddVolumeOptions.Disk.DeepCopy()
newDisk.Name = request.AddVolumeOptions.Name
vmiSpec.Domain.Devices.Disks = append(vmiSpec.Domain.Devices.Disks, *newDisk)
}
}
} else if request.RemoveVolumeOptions != nil {
newVolumesList := []v1.Volume{}
newDisksList := []v1.Disk{}
for _, volume := range vmiSpec.Volumes {
if volume.Name != request.RemoveVolumeOptions.Name {
newVolumesList = append(newVolumesList, volume)
}
}
for _, disk := range vmiSpec.Domain.Devices.Disks {
if disk.Name != request.RemoveVolumeOptions.Name {
newDisksList = append(newDisksList, disk)
}
}
vmiSpec.Volumes = newVolumesList
vmiSpec.Domain.Devices.Disks = newDisksList
}
return vmiSpec
}
func CurrentVMIPod(vmi *v1.VirtualMachineInstance, podIndexer cache.Indexer) (*k8sv1.Pod, error) {
// current pod is the most recent pod created on the current VMI node
// OR the most recent pod created if no VMI node is set.
// Get all pods from the namespace
objs, err := podIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)
if err != nil {
return nil, err
}
pods := []*k8sv1.Pod{}
for _, obj := range objs {
pod := obj.(*k8sv1.Pod)
pods = append(pods, pod)
}
var curPod *k8sv1.Pod = nil
for _, pod := range pods {
if !metav1.IsControlledBy(pod, vmi) {
continue
}
if vmi.Status.NodeName != "" &&
vmi.Status.NodeName != pod.Spec.NodeName {
// This pod isn't scheduled to the current node.
// This can occur during the initial migration phases when
// a new target node is being prepared for the VMI.
continue
}
if curPod == nil || curPod.CreationTimestamp.Before(&pod.CreationTimestamp) {
curPod = pod
}
}
return curPod, nil
}
func VMIActivePodsCount(vmi *v1.VirtualMachineInstance, vmiPodIndexer cache.Indexer) int {
objs, err := vmiPodIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)
if err != nil {
return 0
}
running := 0
for _, obj := range objs {
pod := obj.(*k8sv1.Pod)
if pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed {
// not interested in terminated pods
continue
} else if !metav1.IsControlledBy(pod, vmi) {
// not interested pods not associated with the vmi
continue
}
running++
}
return running
}
func GeneratePatchBytes(ops []string) []byte {
return []byte(fmt.Sprintf("[%s]", strings.Join(ops, ", ")))
}
func SetVMIPhaseTransitionTimestamp(oldStatus *v1.VirtualMachineInstanceStatus, newStatus *v1.VirtualMachineInstanceStatus) {
if oldStatus.Phase != newStatus.Phase {
for _, transitionTimeStamp := range newStatus.PhaseTransitionTimestamps {
if transitionTimeStamp.Phase == newStatus.Phase {
// already exists.
return
}
}
now := metav1.NewTime(time.Now())
newStatus.PhaseTransitionTimestamps = append(newStatus.PhaseTransitionTimestamps, v1.VirtualMachineInstancePhaseTransitionTimestamp{
Phase: newStatus.Phase,
PhaseTransitionTimestamp: now,
})
}
}
func SetVMIMigrationPhaseTransitionTimestamp(oldVMIMigration *v1.VirtualMachineInstanceMigration, newVMIMigration *v1.VirtualMachineInstanceMigration) {
if oldVMIMigration.Status.Phase != newVMIMigration.Status.Phase {
for _, transitionTimeStamp := range newVMIMigration.Status.PhaseTransitionTimestamps {
if transitionTimeStamp.Phase == newVMIMigration.Status.Phase {
// already exists.
return
}
}
now := metav1.NewTime(time.Now())
newVMIMigration.Status.PhaseTransitionTimestamps = append(newVMIMigration.Status.PhaseTransitionTimestamps, v1.VirtualMachineInstanceMigrationPhaseTransitionTimestamp{
Phase: newVMIMigration.Status.Phase,
PhaseTransitionTimestamp: now,
})
}
}
func SetSourcePod(migration *v1.VirtualMachineInstanceMigration, vmi *v1.VirtualMachineInstance, podIndexer cache.Indexer) {
if migration.Status.Phase != v1.MigrationPending {
return
}
sourcePod, err := CurrentVMIPod(vmi, podIndexer)
if err != nil {
log.Log.Object(vmi).Reason(err).Warning("migration source pod not found")
}
if sourcePod != nil {
if migration.Status.MigrationState == nil {
migration.Status.MigrationState = &v1.VirtualMachineInstanceMigrationState{}
}
migration.Status.MigrationState.SourcePod = sourcePod.Name
}
}
func VMIHasHotplugVolumes(vmi *v1.VirtualMachineInstance) bool {
for _, volumeStatus := range vmi.Status.VolumeStatus {
if volumeStatus.HotplugVolume != nil {
return true
}
}
for _, volume := range vmi.Spec.Volumes {
if volume.DataVolume != nil && volume.DataVolume.Hotpluggable {
return true
}
if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.Hotpluggable {
return true
}
}
return false
}
func VMIHasUtilityVolumes(vmi *v1.VirtualMachineInstance) bool {
return len(vmi.Spec.UtilityVolumes) > 0
}
func vmiHasCondition(vmi *v1.VirtualMachineInstance, conditionType v1.VirtualMachineInstanceConditionType) bool {
vmiConditionManager := NewVirtualMachineInstanceConditionManager()
return vmiConditionManager.HasCondition(vmi, conditionType)
}
func VMIHasHotplugCPU(vmi *v1.VirtualMachineInstance) bool {
return vmiHasCondition(vmi, v1.VirtualMachineInstanceVCPUChange)
}
func VMIHasHotplugMemory(vmi *v1.VirtualMachineInstance) bool {
return vmiHasCondition(vmi, v1.VirtualMachineInstanceMemoryChange)
}
func AttachmentPods(ownerPod *k8sv1.Pod, podIndexer cache.Indexer) ([]*k8sv1.Pod, error) {
objs, err := podIndexer.ByIndex(cache.NamespaceIndex, ownerPod.Namespace)
if err != nil {
return nil, err
}
attachmentPods := []*k8sv1.Pod{}
for _, obj := range objs {
pod := obj.(*k8sv1.Pod)
if !metav1.IsControlledBy(pod, ownerPod) {
continue
}
attachmentPods = append(attachmentPods, pod)
}
return attachmentPods, nil
}
// IsPodReady treats the pod as ready to be handed over to virt-handler, as soon as all pods except
// the compute pod are ready.
func IsPodReady(pod *k8sv1.Pod) bool {
if IsPodDownOrGoingDown(pod) {
return false
}
for _, containerStatus := range pod.Status.ContainerStatuses {
// The compute container potentially holds a readiness probe for the VMI. Therefore
// don't wait for the compute container to become ready (the VMI later on will trigger the change to ready)
// and only check that the container started
if containerStatus.Name == "compute" {
if containerStatus.State.Running == nil {
return false
}
} else if containerStatus.Name == "istio-proxy" {
// When using istio the istio-proxy container will not be ready
// until there is a service pointing to this pod.
// We need to start the VM anyway
if containerStatus.State.Running == nil {
return false
}
} else if containerStatus.Ready == false {
return false
}
}
return pod.Status.Phase == k8sv1.PodRunning
}
func IsPodDownOrGoingDown(pod *k8sv1.Pod) bool {
return PodIsDown(pod) || isComputeContainerDown(pod) || pod.DeletionTimestamp != nil
}
func IsPodFailedOrGoingDown(pod *k8sv1.Pod) bool {
return isPodFailed(pod) || isComputeContainerFailed(pod) || pod.DeletionTimestamp != nil
}
func isComputeContainerDown(pod *k8sv1.Pod) bool {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Name == "compute" {
return containerStatus.State.Terminated != nil
}
}
return false
}
func isComputeContainerFailed(pod *k8sv1.Pod) bool {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Name == "compute" {
return containerStatus.State.Terminated != nil && containerStatus.State.Terminated.ExitCode != 0
}
}
return false
}
func PodIsDown(pod *k8sv1.Pod) bool {
return pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed
}
func isPodFailed(pod *k8sv1.Pod) bool {
return pod.Status.Phase == k8sv1.PodFailed
}
func PodExists(pod *k8sv1.Pod) bool {
return pod != nil
}
func GetHotplugVolumes(vmi *v1.VirtualMachineInstance, virtlauncherPod *k8sv1.Pod) []*v1.Volume {
hotplugVolumes := make([]*v1.Volume, 0)
podVolumes := virtlauncherPod.Spec.Volumes
vmiVolumes := vmi.Spec.Volumes
podVolumeMap := make(map[string]k8sv1.Volume)
for _, podVolume := range podVolumes {
podVolumeMap[podVolume.Name] = podVolume
}
for _, vmiVolume := range vmiVolumes {
if _, ok := podVolumeMap[vmiVolume.Name]; !ok && (vmiVolume.DataVolume != nil || vmiVolume.PersistentVolumeClaim != nil || vmiVolume.MemoryDump != nil) {
hotplugVolumes = append(hotplugVolumes, vmiVolume.DeepCopy())
}
}
return hotplugVolumes
}
func SyncPodAnnotations(clientset kubecli.KubevirtClient, pod *k8sv1.Pod, newAnnotations map[string]string) (*k8sv1.Pod, error) {
patchSet := patch.New()
for key, newValue := range newAnnotations {
if podAnnotationValue, keyExist := pod.Annotations[key]; !keyExist || podAnnotationValue != newValue {
patchSet.AddOption(
patch.WithAdd(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(key)), newValue),
)
}
}
if patchSet.IsEmpty() {
return pod, nil
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return pod, fmt.Errorf("failed to generate patch payload: %w", err)
}
patchedPod, err := clientset.CoreV1().Pods(pod.Namespace).Patch(context.Background(), pod.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
log.Log.Object(pod).Errorf("failed to sync pod annotations: %v", err)
return nil, err
}
return patchedPod, nil
}
/*
Copyright 2016 The Kubernetes Authors.
Copyright 2017 The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Taken from https://github.com/kubernetes/kubernetes/blob/b28a83a4cf779189d72a87e847441888e7918e5d/pkg/controller/controller_ref_manager.go
and adapted for KubeVirt.
*/
package controller
import (
"context"
"fmt"
"sync"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
poolv1 "kubevirt.io/api/pool/v1alpha1"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)
type BaseControllerRefManager struct {
Controller metav1.Object
Selector labels.Selector
canAdoptErr error
canAdoptOnce sync.Once
CanAdoptFunc func() error
}
func (m *BaseControllerRefManager) CanAdopt() error {
m.canAdoptOnce.Do(func() {
if m.CanAdoptFunc != nil {
m.canAdoptErr = m.CanAdoptFunc()
}
})
return m.canAdoptErr
}
func (m *BaseControllerRefManager) isOwned(obj metav1.Object) bool {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef == nil {
// no ownership
return false
}
if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else.
return false
}
return true
}
func (m *BaseControllerRefManager) isOwnedByOther(obj metav1.Object) bool {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef == nil {
// no ownership
return false
}
if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else.
return true
}
return false
}
// ReleaseDetachedObject tries to take release ownership of an object for this controller.
//
// It will reconcile the following:
// - Release owned objects if the match function returns false.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The returned boolean indicates whether you now
// own the object.
//
// No reconciliation will be attempted if the controller is being deleted.
//
// Returns
// True - if controller maintains ownership of object
// False - if controller releases or has no ownership of object
// err - if release fails.
func (m *BaseControllerRefManager) ReleaseDetachedObject(obj metav1.Object, match func(metav1.Object) bool, release func(metav1.Object) error) (bool, error) {
isOwned := m.isOwned(obj)
// Remove ownership when object is owned and selector does not match.
if isOwned && !match(obj) {
// Try to release, unless we're being deleted.
if m.Controller.GetDeletionTimestamp() != nil {
return false, nil
}
if err := release(obj); err != nil {
// If the object no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else released it, or there was a transient error.
// The controller should requeue and try again if it's still stale.
return false, err
}
// Successfully released.
return false, nil
}
return isOwned, nil
}
// ClaimObject tries to take ownership of an object for this controller.
//
// It will reconcile the following:
// - Adopt orphans if the match function returns true.
// - Release owned objects if the match function returns false.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The returned boolean indicates whether you now
// own the object.
//
// No reconciliation will be attempted if the controller is being deleted.
func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
owned := m.isOwned(obj)
ownedByOther := m.isOwnedByOther(obj)
matched := match(obj)
if owned && matched {
// already owned and matched.
return true, nil
} else if owned && !matched {
// owned, but selector doesn't match, so release if possible
isStillOwned, err := m.ReleaseDetachedObject(obj, match, release)
if err != nil {
return isStillOwned, err
}
return isStillOwned, nil
} else if !owned && !ownedByOther && matched {
// Not owned by anyone, but matches our selector, so adopt the orphan.
if m.Controller.GetDeletionTimestamp() != nil || !matched {
// Ignore if we're being deleted or selector doesn't match.
return false, nil
}
if obj.GetDeletionTimestamp() != nil {
// Ignore if the object is being deleted
return false, nil
}
// Selector matches. Try to adopt.
if err := adopt(obj); err != nil {
// If the object no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else claimed it first, or there was a transient error.
// The controller should requeue and try again if it's still orphaned.
return false, err
}
// Successfully adopted.
return true, nil
} else {
// not owned or matched and can not be claimed
return false, nil
}
}
type VirtualMachineControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
virtualMachineControl VirtualMachineControlInterface
}
// NewVirtualMachineControllerRefManager returns a VirtualMachineControllerRefManager that exposes
// methods to manage the controllerRef of virtual machines.
//
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
//
// VirtualMachineControllerRefManager instance. Create a new instance if it makes
// sense to check CanAdopt() again (e.g. in a different sync pass).
func NewVirtualMachineControllerRefManager(
virtualMachineControl VirtualMachineControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func() error,
) *VirtualMachineControllerRefManager {
return &VirtualMachineControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
virtualMachineControl: virtualMachineControl,
}
}
// ClaimVirtualMachineInstances tries to take ownership of a list of VirtualMachineInstances.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// Optional: If one or more filters are specified, a VirtualMachineInstance will only be claimed if
// all filters return true.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of VirtualMachines that you now own is returned.
func (m *VirtualMachineControllerRefManager) ClaimVirtualMachineInstances(vmis []*virtv1.VirtualMachineInstance, filters ...func(machine *virtv1.VirtualMachineInstance) bool) ([]*virtv1.VirtualMachineInstance, error) {
var claimed []*virtv1.VirtualMachineInstance
var errlist []error
match := func(obj metav1.Object) bool {
vmi := obj.(*virtv1.VirtualMachineInstance)
// Check selector first so filters only run on potentially matching VirtualMachines.
if !m.Selector.Matches(labels.Set(vmi.Labels)) {
return false
}
for _, filter := range filters {
if !filter(vmi) {
return false
}
}
return true
}
adopt := func(obj metav1.Object) error {
return m.AdoptVirtualMachineInstance(obj.(*virtv1.VirtualMachineInstance))
}
release := func(obj metav1.Object) error {
return m.ReleaseVirtualMachineInstance(obj.(*virtv1.VirtualMachineInstance))
}
for _, vmi := range vmis {
ok, err := m.ClaimObject(vmi, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, vmi)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// ReleaseDetachedVirtualMachines removes ownership of detached VMs.
//
// It will reconcile the following:
// - Release owned objects if the selector no longer matches.
//
// List of Owned VMs is returned.
func (m *VirtualMachineControllerRefManager) ReleaseDetachedVirtualMachines(vms []*virtv1.VirtualMachine, filters ...func(machine *virtv1.VirtualMachine) bool) ([]*virtv1.VirtualMachine, error) {
var owned []*virtv1.VirtualMachine
var errlist []error
match := func(obj metav1.Object) bool {
vm := obj.(*virtv1.VirtualMachine)
// Check selector first so filters only run on potentially matching VirtualMachines.
if !m.Selector.Matches(labels.Set(vm.Labels)) {
return false
}
for _, filter := range filters {
if !filter(vm) {
return false
}
}
return true
}
release := func(obj metav1.Object) error {
return m.ReleaseVirtualMachine(obj.(*virtv1.VirtualMachine))
}
for _, vm := range vms {
isOwner, err := m.ReleaseDetachedObject(vm, match, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if isOwner {
owned = append(owned, vm)
}
}
return owned, utilerrors.NewAggregate(errlist)
}
// ClaimMatchedDataVolumes tries to take ownership of a list of DataVolumes.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// Optional: If one or more filters are specified, a DataVolume will only be claimed if
// all filters return true.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of DataVolumes that you now own is returned.
func (m *VirtualMachineControllerRefManager) ClaimMatchedDataVolumes(dataVolumes []*cdiv1.DataVolume) ([]*cdiv1.DataVolume, error) {
var claimed []*cdiv1.DataVolume
var errlist []error
match := func(obj metav1.Object) bool {
return true
}
adopt := func(obj metav1.Object) error {
return m.AdoptDataVolume(obj.(*cdiv1.DataVolume))
}
release := func(obj metav1.Object) error {
return m.ReleaseDataVolume(obj.(*cdiv1.DataVolume))
}
for _, dataVolume := range dataVolumes {
ok, err := m.ClaimObject(dataVolume, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, dataVolume)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// ClaimVirtualMachineInstanceByName tries to take ownership of a VirtualMachineInstance.
//
// It will reconcile the following:
// - Adopt orphans if the selector matches.
// - Release owned objects if the selector no longer matches.
//
// Optional: If one or more filters are specified, a VirtualMachineInstance will only be claimed if
// all filters return true.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of VirtualMachines that you now own is returned.
func (m *VirtualMachineControllerRefManager) ClaimVirtualMachineInstanceByName(vmi *virtv1.VirtualMachineInstance, filters ...func(machine *virtv1.VirtualMachineInstance) bool) (*virtv1.VirtualMachineInstance, error) {
match := func(obj metav1.Object) bool {
vmi := obj.(*virtv1.VirtualMachineInstance)
// Check selector first so filters only run on potentially matching VirtualMachines.
if m.Controller.GetName() != vmi.Name {
return false
}
for _, filter := range filters {
if !filter(vmi) {
return false
}
}
return true
}
adopt := func(obj metav1.Object) error {
return m.AdoptVirtualMachineInstance(obj.(*virtv1.VirtualMachineInstance))
}
release := func(obj metav1.Object) error {
return m.ReleaseVirtualMachineInstance(obj.(*virtv1.VirtualMachineInstance))
}
ok, err := m.ClaimObject(vmi, match, adopt, release)
if err != nil {
return nil, err
}
if ok {
return vmi, nil
}
return nil, nil
}
// AdoptVirtualMachineInstance sends a patch to take control of the vmi. It returns the error if
// the patching fails.
func (m *VirtualMachineControllerRefManager) AdoptVirtualMachineInstance(vmi *virtv1.VirtualMachineInstance) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt VirtualMachineInstance %v/%v (%v): %v", vmi.Namespace, vmi.Name, vmi.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.Controller.GetName(), m.Controller.GetUID(), vmi.UID)
return m.virtualMachineControl.PatchVirtualMachineInstance(vmi.Namespace, vmi.Name, []byte(addControllerPatch))
}
// ReleaseVirtualMachineInstance sends a patch to free the virtual machine from the control of the controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *VirtualMachineControllerRefManager) ReleaseVirtualMachineInstance(vmi *virtv1.VirtualMachineInstance) error {
log.Log.V(2).Object(vmi).Infof("patching vmi to remove its controllerRef to %s/%s:%s",
m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
// TODO CRDs don't support strategic merge, therefore replace the onwerReferences list with a merge patch
deleteOwnerRefPatch := fmt.Sprint(`{"metadata":{"ownerReferences":[]}}`)
err := m.virtualMachineControl.PatchVirtualMachineInstance(vmi.Namespace, vmi.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
// If the vmi no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the vmi
// has no owner reference, 2. the uid of the vmi doesn't
// match, which means the vmi is deleted and then recreated.
// In both cases, the error can be ignored.
// TODO: If the vmi has owner references, but none of them
// has the owner.UID, server will silently ignore the patch.
// Investigate why.
return nil
}
}
return err
}
// AdoptVirtualMachine sends a patch to take control of the vm. It returns the error if
// the patching fails.
func (m *VirtualMachineControllerRefManager) AdoptVirtualMachine(vm *virtv1.VirtualMachine) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt VirtualMachine %v/%v (%v): %v", vm.Namespace, vm.Name, vm.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.Controller.GetName(), m.Controller.GetUID(), vm.UID)
return m.virtualMachineControl.PatchVirtualMachine(vm.Namespace, vm.Name, []byte(addControllerPatch))
}
// ReleaseVirtualMachine sends a patch to free the virtual machine from the control of the controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *VirtualMachineControllerRefManager) ReleaseVirtualMachine(vm *virtv1.VirtualMachine) error {
log.Log.V(2).Object(vm).Infof("patching vm to remove its controllerRef to %s/%s:%s",
m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
// when the vm is owned by a pool and is being released, we need to remove the pool finalizer too, if not pool finalizer will stay forever until user manually removes it
var newFinalizers []string
for _, fin := range vm.Finalizers {
if fin != poolv1.VirtualMachinePoolControllerFinalizer {
newFinalizers = append(newFinalizers, fin)
}
}
// TODO CRDs don't support strategic merge, therefore replace the onwerReferences list with a merge patch
releaseVMPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[],"finalizers":%s}}`, newFinalizers)
err := m.virtualMachineControl.PatchVirtualMachine(vm.Namespace, vm.Name, []byte(releaseVMPatch))
if err != nil {
if errors.IsNotFound(err) {
// If the vm no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the vm
// has no owner reference, 2. the uid of the vm doesn't
// match, which means the vm is deleted and then recreated.
// In both cases, the error can be ignored.
// TODO: If the vm has owner references, but none of them
// has the owner.UID, server will silently ignore the patch.
// Investigate why.
return nil
}
}
return err
}
// AdoptDataVolume sends a patch to take control of the dataVolume. It returns the error if
// the patching fails.
func (m *VirtualMachineControllerRefManager) AdoptDataVolume(dataVolume *cdiv1.DataVolume) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt DataVolume %v/%v (%v): %v", dataVolume.Namespace, dataVolume.Name, dataVolume.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.Controller.GetName(), m.Controller.GetUID(), dataVolume.UID)
return m.virtualMachineControl.PatchDataVolume(dataVolume.Namespace, dataVolume.Name, []byte(addControllerPatch))
}
// ReleaseDataVolume sends a patch to free the dataVolume from the control of the controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *VirtualMachineControllerRefManager) ReleaseDataVolume(dataVolume *cdiv1.DataVolume) error {
log.Log.V(2).Object(dataVolume).Infof("patching dataVolume to remove its controllerRef to %s/%s:%s",
m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
// TODO CRDs don't support strategic merge, therefore replace the onwerReferences list with a merge patch
deleteOwnerRefPatch := fmt.Sprint(`{"metadata":{"ownerReferences":[]}}`)
err := m.virtualMachineControl.PatchDataVolume(dataVolume.Namespace, dataVolume.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
// If no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the dataVolume
// has no owner reference, 2. the uid of the dataVolume doesn't
// match, which means the dataVolume is deleted and then recreated.
// In both cases, the error can be ignored.
// TODO: If the dataVolume has owner references, but none of them
// has the owner.UID, server will silently ignore the patch.
// Investigate why.
return nil
}
}
return err
}
type VirtualMachineControlInterface interface {
PatchVirtualMachine(namespace, name string, data []byte) error
PatchVirtualMachineInstance(namespace, name string, data []byte) error
PatchDataVolume(namespace, name string, data []byte) error
}
type RealVirtualMachineControl struct {
Clientset kubecli.KubevirtClient
}
func (r RealVirtualMachineControl) PatchVirtualMachineInstance(namespace, name string, data []byte) error {
// TODO should be a strategic merge patch, but not possible until https://github.com/kubernetes/kubernetes/issues/56348 is resolved
_, err := r.Clientset.VirtualMachineInstance(namespace).Patch(context.Background(), name, types.MergePatchType, data, metav1.PatchOptions{})
return err
}
func (r RealVirtualMachineControl) PatchVirtualMachine(namespace, name string, data []byte) error {
// TODO should be a strategic merge patch, but not possible until https://github.com/kubernetes/kubernetes/issues/56348 is resolved
_, err := r.Clientset.VirtualMachine(namespace).Patch(context.Background(), name, types.MergePatchType, data, metav1.PatchOptions{})
return err
}
func (r RealVirtualMachineControl) PatchDataVolume(namespace, name string, data []byte) error {
// TODO should be a strategic merge patch, but not possible until https://github.com/kubernetes/kubernetes/issues/56348 is resolved
_, err := r.Clientset.CdiClient().CdiV1beta1().DataVolumes(namespace).Patch(context.Background(), name, types.MergePatchType, data, metav1.PatchOptions{})
return err
}
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
//
// The CanAdopt() function calls getObject() to fetch the latest value,
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
return func() error {
obj, err := getObject()
if err != nil {
return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
}
if obj.GetDeletionTimestamp() != nil {
return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp())
}
return nil
}
}
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/* TODO:
Taken from https://github.com/kubernetes/kubernetes/blob/1889a6ef52eb18b08e24843577c5b9d3b9a65daa/pkg/controller/controller_utils.go
As soon as expectations become available in client-go or apimachinery, delete this and switch.
*/
package controller
import (
"fmt"
"sync"
"sync/atomic"
"time"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
"kubevirt.io/client-go/log"
)
const (
// If a watch drops a delete event for a pod, it'll take this long
// before a dormant controller waiting for those packets is woken up anyway. It is
// specifically targeted at the case where some problem prevents an update
// of expectations, without it the controller could stay asleep forever. This should
// be set based on the expected latency of watch events.
//
// Currently a controller can service (create *and* observe the watch events for said
// creation) about 10 pods a second, so it takes about 1 min to service
// 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s
// latency/pod at the scale of 3000 pods over 100 nodes.
ExpectationsTimeout = 5 * time.Minute
)
// Expectations are a way for controllers to tell the controller manager what they expect. eg:
// ControllerExpectations: {
// controller1: expects 2 adds in 2 minutes
// controller2: expects 2 dels in 2 minutes
// controller3: expects -1 adds in 2 minutes => controller3's expectations have already been met
// }
//
// Implementation:
// ControlleeExpectation = pair of atomic counters to track controllee's creation/deletion
// ControllerExpectationsStore = TTLStore + a ControlleeExpectation per controller
//
// * Once set expectations can only be lowered
// * A controller isn't synced till its expectations are either fulfilled, or expire
// * Controllers that don't set expectations will get woken up for every matching controllee
// ExpKeyFunc to parse out the key from a ControlleeExpectation
var ExpKeyFunc = func(obj interface{}) (string, error) {
if e, ok := obj.(*ControlleeExpectations); ok {
return e.key, nil
}
return "", fmt.Errorf("Could not find key for obj %#v", obj)
}
// ControllerExpectationsInterface is an interface that allows users to set and wait on expectations.
// Only abstracted out for testing.
// Warning: if using KeyFunc it is not safe to use a single ControllerExpectationsInterface with different
// types of controllers, because the keys might conflict across types.
type ControllerExpectationsInterface interface {
GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error)
SatisfiedExpectations(controllerKey string) bool
DeleteExpectations(controllerKey string)
SetExpectations(controllerKey string, add, del int)
ExpectCreations(controllerKey string, adds int)
ExpectDeletions(controllerKey string, dels int)
CreationObserved(controllerKey string)
DeletionObserved(controllerKey string)
RaiseExpectations(controllerKey string, add, del int)
LowerExpectations(controllerKey string, add, del int)
AllPendingCreations() (creations int64)
}
// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync.
type ControllerExpectations struct {
cache.Store
// A name for identifying this expectations
name string
}
// GetExpectations returns the ControlleeExpectations of the given controller.
func (r *ControllerExpectations) GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) {
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
return exp.(*ControlleeExpectations), true, nil
} else {
return nil, false, err
}
}
// DeleteExpectations deletes the expectations of the given controller from the TTLStore.
func (r *ControllerExpectations) DeleteExpectations(controllerKey string) {
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
if err := r.Delete(exp); err != nil {
log.Log.Infof("Error deleting expectations for controller %v: %v", controllerKey, err)
}
}
}
// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed.
// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller
// manager.
func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool {
if exp, exists, err := r.GetExpectations(controllerKey); exists {
if exp.Fulfilled() {
log.Log.V(4).Infof("Controller expectations (name: %s) fulfilled %#v", r.name, exp)
return true
} else if exp.isExpired() {
log.Log.V(4).Infof("Controller expectations (name: %s) expired %#v", r.name, exp)
return true
} else {
log.Log.V(4).Infof("Controller (name: %s) still waiting on expectations %#v", r.name, exp)
return false
}
} else if err != nil {
log.Log.Infof("Error encountered while checking expectations (name: %s) %#v, forcing sync", r.name, err)
} else {
// When a new controller is created, it doesn't have expectations.
// When it doesn't see expected watch events for > TTL, the expectations expire.
// - In this case it wakes up, creates/deletes controllees, and sets expectations again.
// When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire.
// - In this case it continues without setting expectations till it needs to create/delete controllees.
log.Log.V(4).Infof("Controller %v (name: %s) either never recorded expectations, or the ttl expired.", controllerKey, r.name)
}
// Trigger a sync if we either encountered and error (which shouldn't happen since we're
// getting from local store) or this controller hasn't established expectations.
return true
}
// TODO: Extend ExpirationCache to support explicit expiration.
// TODO: Make this possible to disable in tests.
// TODO: Support injection of clock.
func (exp *ControlleeExpectations) isExpired() bool {
return clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout
}
func panicWithKeyFuncMsg(err error) {
const keyFuncChangedFormat = "KeyFunc was changed, %v"
panic(fmt.Errorf(keyFuncChangedFormat, err))
}
// SetExpectations registers new expectations for the given controller. Forgets existing expectations.
func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) {
exp := &ControlleeExpectations{key: controllerKey, timestamp: clock.RealClock{}.Now()}
exp.add.Store(int64(add))
exp.del.Store(int64(del))
log.Log.V(4).Infof("Setting expectations %#v", exp)
if err := r.Add(exp); err != nil {
panicWithKeyFuncMsg(err)
}
}
func (r *ControllerExpectations) ExpectCreations(controllerKey string, adds int) {
r.SetExpectations(controllerKey, adds, 0)
}
func (r *ControllerExpectations) ExpectDeletions(controllerKey string, dels int) {
r.SetExpectations(controllerKey, 0, dels)
}
// LowerExpectations decrements the expectation counts of the given controller.
func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, del int) {
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
exp.Add(int64(-add), int64(-del))
// The expectations might've been modified since the update on the previous line.
log.Log.V(4).Infof("Lowered expectations: %s", exp)
}
}
// RaiseExpectations increments the expectation counts of the given controller.
func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, del int) {
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
exp.Add(int64(add), int64(del))
// The expectations might've been modified since the update on the previous line.
log.Log.V(4).Infof("Raised expectations: %s", exp)
}
}
// CreationObserved atomically decrements the `add` expectation count of the given controller.
func (r *ControllerExpectations) CreationObserved(controllerKey string) {
r.LowerExpectations(controllerKey, 1, 0)
}
func (r *ControllerExpectations) AllPendingCreations() (sum int64) {
for _, key := range r.ListKeys() {
exp, exists, _ := r.GetExpectations(key)
if exists {
sum = sum + exp.add.Load()
}
}
return
}
// DeletionObserved atomically decrements the `del` expectation count of the given controller.
func (r *ControllerExpectations) DeletionObserved(controllerKey string) {
r.LowerExpectations(controllerKey, 0, 1)
}
// Expectations are either fulfilled, or expire naturally.
type Expectations interface {
Fulfilled() bool
}
// ControlleeExpectations track controllee creates/deletes.
type ControlleeExpectations struct {
add atomic.Int64
del atomic.Int64
key string
timestamp time.Time
}
// Add increments the add and del counters.
func (e *ControlleeExpectations) Add(add, del int64) {
e.add.Add(add)
e.del.Add(del)
}
// Fulfilled returns true if this expectation has been fulfilled.
func (e *ControlleeExpectations) Fulfilled() bool {
// TODO: think about why this line being atomic doesn't matter
return e.add.Load() <= 0 && e.del.Load() <= 0
}
// GetExpectations returns the add and del expectations of the controllee.
func (e *ControlleeExpectations) GetExpectations() (int64, int64) {
return e.add.Load(), e.del.Load()
}
// String formats the controllee expectations as a string.
func (e *ControlleeExpectations) String() string {
return fmt.Sprintf("key: %s, timestamp: %v, add: %d, del: %d", e.key, e.timestamp, e.add.Load(), e.del.Load())
}
// NewControllerExpectations returns a store for ControllerExpectations.
func NewControllerExpectations() *ControllerExpectations {
return &ControllerExpectations{Store: cache.NewStore(ExpKeyFunc), name: "n/a"}
}
func NewControllerExpectationsWithName(name string) *ControllerExpectations {
return &ControllerExpectations{Store: cache.NewStore(ExpKeyFunc), name: name}
}
// UIDSetKeyFunc to parse out the key from a UIDSet.
var UIDSetKeyFunc = func(obj interface{}) (string, error) {
if u, ok := obj.(*UIDSet); ok {
return u.key, nil
}
return "", fmt.Errorf("Could not find key for obj %#v", obj)
}
// UIDSet holds a key and a set of UIDs. Used by the
// UIDTrackingControllerExpectations to remember which UID it has seen/still
// waiting for.
type UIDSet struct {
sets.String
key string
}
// UIDTrackingControllerExpectations tracks the UID of the pods it deletes.
// This cache is needed over plain old expectations to safely handle graceful
// deletion. The desired behavior is to treat an update that sets the
// DeletionTimestamp on an object as a delete. To do so consistently, one needs
// to remember the expected deletes so they aren't double counted.
// TODO: RTCTimerTrack creates as well (#22599)
type UIDTrackingControllerExpectations struct {
ControllerExpectationsInterface
// TODO: There is a much nicer way to do this that involves a single store,
// a lock per entry, and a ControlleeExpectationsInterface type.
uidStoreLock sync.Mutex
// Store used for the UIDs associated with any expectation tracked via the
// ControllerExpectationsInterface.
uidStore cache.Store
}
// GetUIDs is a convenience method to avoid exposing the set of expected uids.
// The returned set is not thread safe, all modifications must be made holding
// the uidStoreLock.
func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.String {
if uid, exists, err := u.uidStore.GetByKey(controllerKey); err == nil && exists {
return uid.(*UIDSet).String
}
return nil
}
// ExpectDeletions records expectations for the given deleteKeys, against the given controller.
func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, deletedKeys []string) {
u.uidStoreLock.Lock()
defer u.uidStoreLock.Unlock()
if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 {
log.Log.Errorf("Clobbering existing delete keys: %+v", existing)
}
expectedUIDs := sets.NewString()
for _, k := range deletedKeys {
expectedUIDs.Insert(k)
}
log.Log.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys)
if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil {
panicWithKeyFuncMsg(err)
}
u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len())
}
func (u *UIDTrackingControllerExpectations) AddExpectedDeletion(rcKey string, deletedKey string) {
u.uidStoreLock.Lock()
defer u.uidStoreLock.Unlock()
expectedUIDs := sets.NewString()
if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 {
expectedUIDs = existing
}
expectedUIDs.Insert(deletedKey)
log.Log.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, expectedUIDs)
if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil {
panicWithKeyFuncMsg(err)
}
u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len())
}
// DeletionObserved records the given deleteKey as a deletion, for the given rc.
func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey string) {
u.uidStoreLock.Lock()
defer u.uidStoreLock.Unlock()
uids := u.GetUIDs(rcKey)
if uids != nil && uids.Has(deleteKey) {
log.Log.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey)
u.ControllerExpectationsInterface.DeletionObserved(rcKey)
uids.Delete(deleteKey)
}
}
// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the
// underlying ControllerExpectationsInterface.
func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) {
u.uidStoreLock.Lock()
defer u.uidStoreLock.Unlock()
u.ControllerExpectationsInterface.DeleteExpectations(rcKey)
if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists {
if err := u.uidStore.Delete(uidExp); err != nil {
log.Log.Infof("Error deleting uid expectations for controller %v: %v", rcKey, err)
}
}
}
// NewUIDTrackingControllerExpectations returns a wrapper around
// ControllerExpectations that is aware of deleteKeys.
func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations {
return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)}
}
package controller
import (
"fmt"
"k8s.io/client-go/tools/cache"
)
var (
KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
)
func NamespacedKey(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package controller
import (
"errors"
"fmt"
"math/rand"
"sync"
"time"
vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
routev1 "github.com/openshift/api/route/v1"
secv1 "github.com/openshift/api/security/v1"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
coordinationv1 "k8s.io/api/coordination/v1"
k8sv1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
policyv1 "k8s.io/api/policy/v1"
rbacv1 "k8s.io/api/rbac/v1"
resourcev1 "k8s.io/api/resource/v1"
storagev1 "k8s.io/api/storage/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
extclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
backupv1 "kubevirt.io/api/backup/v1alpha1"
clonebase "kubevirt.io/api/clone"
clone "kubevirt.io/api/clone/v1beta1"
"kubevirt.io/api/core"
kubev1 "kubevirt.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
exportv1 "kubevirt.io/api/export/v1beta1"
instancetypeapi "kubevirt.io/api/instancetype"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/api/migrations"
migrationsv1 "kubevirt.io/api/migrations/v1alpha1"
poolv1 "kubevirt.io/api/pool/v1beta1"
"kubevirt.io/api/snapshot"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/testutils"
)
const (
/*
TODO: replace the assignment to expression that accepts only kubev1.ManagedByLabelOperatorValue after few releases (after 0.47)
The new assignment is to avoid error on update
(operator can't recognize components with the old managed-by label's value)
*/
OperatorLabel = kubev1.ManagedByLabel + " in (" + kubev1.ManagedByLabelOperatorValue + "," + kubev1.ManagedByLabelOperatorOldValue + " )"
NotOperatorLabel = kubev1.ManagedByLabel + " notin (" + kubev1.ManagedByLabelOperatorValue + "," + kubev1.ManagedByLabelOperatorOldValue + " )"
)
const (
ByVMINameIndex = "byVMIName"
ByMigrationUIDIndex = "byMigrationUID"
UnfinishedIndex = "unfinished"
)
var unexpectedObjectError = errors.New("unexpected object")
type newSharedInformer func() cache.SharedIndexInformer
type KubeInformerFactory interface {
// Starts any informers that have not been started yet
// This function is thread safe and idempotent
Start(stopCh <-chan struct{})
// Waits for all informers to sync
WaitForCacheSync(stopCh <-chan struct{})
// Watches for vmi objects
VMI() cache.SharedIndexInformer
// Watches for vmi objects assigned to a specific host
VMISourceHost(hostName string) cache.SharedIndexInformer
// Watches for vmi objects assigned to a specific host
// as a migration target
VMITargetHost(hostName string) cache.SharedIndexInformer
// Watches for VirtualMachineInstanceReplicaSet objects
VMIReplicaSet() cache.SharedIndexInformer
// Watches for VirtualMachinePool objects
VMPool() cache.SharedIndexInformer
// Watches for VirtualMachineInstancePreset objects
VirtualMachinePreset() cache.SharedIndexInformer
// Watches for pods related only to kubevirt
KubeVirtPod() cache.SharedIndexInformer
// Watches for nodes
KubeVirtNode() cache.SharedIndexInformer
// VirtualMachine handles the VMIs that are stopped or not running
VirtualMachine() cache.SharedIndexInformer
// Watches VirtualMachineInstanceMigration objects
VirtualMachineInstanceMigration() cache.SharedIndexInformer
// Watches VirtualMachineBackup objects
VirtualMachineBackup() cache.SharedIndexInformer
// Watches VirtualMachineBackupTracker objects
VirtualMachineBackupTracker() cache.SharedIndexInformer
// Watches VirtualMachineExport objects
VirtualMachineExport() cache.SharedIndexInformer
// Watches VirtualMachineSnapshot objects
VirtualMachineSnapshot() cache.SharedIndexInformer
// Watches VirtualMachineSnapshot objects
VirtualMachineSnapshotContent() cache.SharedIndexInformer
// Watches VirtualMachineRestore objects
VirtualMachineRestore() cache.SharedIndexInformer
// Watches MigrationPolicy objects
MigrationPolicy() cache.SharedIndexInformer
// Watches VirtualMachineClone objects
VirtualMachineClone() cache.SharedIndexInformer
// Watches VirtualMachineInstancetype objects
VirtualMachineInstancetype() cache.SharedIndexInformer
// Watches VirtualMachineClusterInstancetype objects
VirtualMachineClusterInstancetype() cache.SharedIndexInformer
// Watches VirtualMachinePreference objects
VirtualMachinePreference() cache.SharedIndexInformer
// Watches VirtualMachineClusterPreference objects
VirtualMachineClusterPreference() cache.SharedIndexInformer
// Watches for k8s extensions api configmap
ApiAuthConfigMap() cache.SharedIndexInformer
// Watches for the kubevirt CA config map
KubeVirtCAConfigMap() cache.SharedIndexInformer
// Watches for the kubevirt export CA config map
KubeVirtExportCAConfigMap() cache.SharedIndexInformer
// Watches for changes in kubevirt leases
Leases() cache.SharedIndexInformer
// Watches for the export route config map
ExportRouteConfigMap() cache.SharedIndexInformer
// Watches for the kubevirt export service
ExportService() cache.SharedIndexInformer
// ConfigMaps which are managed by the operator
OperatorConfigMap() cache.SharedIndexInformer
// Watches for PersistentVolumeClaim objects
PersistentVolumeClaim() cache.SharedIndexInformer
// Watches for ControllerRevision objects
ControllerRevision() cache.SharedIndexInformer
// Watches for CDI DataVolume objects
DataVolume() cache.SharedIndexInformer
// Fake CDI DataVolume informer used when feature gate is disabled
DummyDataVolume() cache.SharedIndexInformer
// Watches for CDI DataSource objects
DataSource() cache.SharedIndexInformer
// Fake CDI DataSource informer used when feature gate is disabled
DummyDataSource() cache.SharedIndexInformer
// Watches for CDI StorageProfile objects
StorageProfile() cache.SharedIndexInformer
// Fake CDI StorageProfile informer used when feature gate is disabled
DummyStorageProfile() cache.SharedIndexInformer
// Watches for CDI objects
CDI() cache.SharedIndexInformer
// Fake CDI informer used when feature gate is disabled
DummyCDI() cache.SharedIndexInformer
// Watches for CDIConfig objects
CDIConfig() cache.SharedIndexInformer
// Fake CDIConfig informer used when feature gate is disabled
DummyCDIConfig() cache.SharedIndexInformer
// CRD
CRD() cache.SharedIndexInformer
// Watches for KubeVirt objects
KubeVirt() cache.SharedIndexInformer
// Service Accounts
OperatorServiceAccount() cache.SharedIndexInformer
// ClusterRole
OperatorClusterRole() cache.SharedIndexInformer
// ClusterRoleBinding
OperatorClusterRoleBinding() cache.SharedIndexInformer
// Roles
OperatorRole() cache.SharedIndexInformer
// RoleBinding
OperatorRoleBinding() cache.SharedIndexInformer
// CRD
OperatorCRD() cache.SharedIndexInformer
// Service
OperatorService() cache.SharedIndexInformer
// DaemonSet
OperatorDaemonSet() cache.SharedIndexInformer
// Deployment
OperatorDeployment() cache.SharedIndexInformer
// SecurityContextConstraints
OperatorSCC() cache.SharedIndexInformer
// Fake SecurityContextConstraints informer used when not on openshift
DummyOperatorSCC() cache.SharedIndexInformer
// Routes
OperatorRoute() cache.SharedIndexInformer
// Fake Routes informer used when not on openshift
DummyOperatorRoute() cache.SharedIndexInformer
// Ingress
Ingress() cache.SharedIndexInformer
// ConfigMaps for operator install strategies
OperatorInstallStrategyConfigMaps() cache.SharedIndexInformer
// Jobs for dumping operator install strategies
OperatorInstallStrategyJob() cache.SharedIndexInformer
// KubeVirt infrastructure pods
OperatorPod() cache.SharedIndexInformer
// Webhooks created/managed by virt operator
OperatorValidationWebhook() cache.SharedIndexInformer
// Webhooks created/managed by virt operator
OperatorMutatingWebhook() cache.SharedIndexInformer
// APIServices created/managed by virt operator
OperatorAPIService() cache.SharedIndexInformer
// PodDisruptionBudgets created/managed by virt operator
OperatorPodDisruptionBudget() cache.SharedIndexInformer
// ServiceMonitors created/managed by virt operator
OperatorServiceMonitor() cache.SharedIndexInformer
// Managed secrets which hold data like certificates
Secrets() cache.SharedIndexInformer
// Unmanaged secrets for things like Ingress TLS
UnmanagedSecrets() cache.SharedIndexInformer
// Fake ServiceMonitor informer used when Prometheus is not installed
DummyOperatorServiceMonitor() cache.SharedIndexInformer
// ValidatingAdmissionPolicyBinding created/managed by virt operator
OperatorValidatingAdmissionPolicyBinding() cache.SharedIndexInformer
// Fake OperatorValidatingAdmissionPolicyBinding informer used when ValidatingAdmissionPolicyBinding is not installed
DummyOperatorValidatingAdmissionPolicyBinding() cache.SharedIndexInformer
// ValidatingAdmissionPolicies created/managed by virt operator
OperatorValidatingAdmissionPolicy() cache.SharedIndexInformer
// Fake OperatorValidatingAdmissionPolicy informer used when ValidatingAdmissionPolicy is not installed
DummyOperatorValidatingAdmissionPolicy() cache.SharedIndexInformer
// The namespace where kubevirt is deployed in
Namespace() cache.SharedIndexInformer
// PrometheusRules created/managed by virt operator
OperatorPrometheusRule() cache.SharedIndexInformer
// Fake PrometheusRule informer used when Prometheus not installed
DummyOperatorPrometheusRule() cache.SharedIndexInformer
// PVC StorageClasses
StorageClass() cache.SharedIndexInformer
// Pod returns an informer for ALL Pods in the system
Pod() cache.SharedIndexInformer
ResourceQuota() cache.SharedIndexInformer
ResourceClaim() cache.SharedIndexInformer
DummyResourceClaim() cache.SharedIndexInformer
ResourceSlice() cache.SharedIndexInformer
DummyResourceSlice() cache.SharedIndexInformer
K8SInformerFactory() informers.SharedInformerFactory
}
type kubeInformerFactory struct {
restClient *rest.RESTClient
clientSet kubecli.KubevirtClient
aggregatorClient aggregatorclient.Interface
lock sync.Mutex
defaultResync time.Duration
informers map[string]cache.SharedIndexInformer
startedInformers map[string]bool
kubevirtNamespace string
k8sInformers informers.SharedInformerFactory
}
func NewKubeInformerFactory(restClient *rest.RESTClient, clientSet kubecli.KubevirtClient, aggregatorClient aggregatorclient.Interface, kubevirtNamespace string) KubeInformerFactory {
return &kubeInformerFactory{
restClient: restClient,
clientSet: clientSet,
aggregatorClient: aggregatorClient,
// Resulting resync period will be between 12 and 24 hours, like the default for k8s
defaultResync: ResyncPeriod(12 * time.Hour),
informers: make(map[string]cache.SharedIndexInformer),
startedInformers: make(map[string]bool),
kubevirtNamespace: kubevirtNamespace,
k8sInformers: informers.NewSharedInformerFactoryWithOptions(clientSet, 0),
}
}
// Start can be called from multiple controllers in different go routines safely.
// Only informers that have not started are triggered by this function.
// Multiple calls to this function are idempotent.
func (f *kubeInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for name, informer := range f.informers {
if f.startedInformers[name] {
// skip informers that have already started.
log.Log.Infof("SKIPPING informer %s", name)
continue
}
log.Log.Infof("STARTING informer %s", name)
go informer.Run(stopCh)
f.startedInformers[name] = true
}
f.k8sInformers.Start(stopCh)
}
func (f *kubeInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) {
syncs := []cache.InformerSynced{}
f.lock.Lock()
for name, informer := range f.informers {
log.Log.Infof("Waiting for cache sync of informer %s", name)
syncs = append(syncs, informer.HasSynced)
}
f.lock.Unlock()
cache.WaitForCacheSync(stopCh, syncs...)
}
// internal function used to retrieve an already created informer
// or create a new informer if one does not already exist.
// Thread safe
func (f *kubeInformerFactory) getInformer(key string, newFunc newSharedInformer) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informer, exists := f.informers[key]
if exists {
return informer
}
informer = newFunc()
f.informers[key] = informer
return informer
}
func (f *kubeInformerFactory) Namespace() cache.SharedIndexInformer {
return f.getInformer("namespaceInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "namespaces", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(
lw,
&k8sv1.Namespace{},
f.defaultResync,
cache.Indexers{
"namespace_name": func(obj interface{}) ([]string, error) {
return []string{obj.(*k8sv1.Namespace).GetName()}, nil
},
},
)
})
}
func GetVMIInformerIndexers() cache.Indexers {
return cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
"node": func(obj interface{}) (strings []string, e error) {
return []string{obj.(*kubev1.VirtualMachineInstance).Status.NodeName}, nil
},
"dv": func(obj interface{}) ([]string, error) {
vmi, ok := obj.(*kubev1.VirtualMachineInstance)
if !ok {
return nil, unexpectedObjectError
}
var dvs []string
for _, vol := range vmi.Spec.Volumes {
if vol.DataVolume != nil {
dvs = append(dvs, fmt.Sprintf("%s/%s", vmi.Namespace, vol.DataVolume.Name))
}
}
return dvs, nil
},
"pvc": func(obj interface{}) ([]string, error) {
vmi, ok := obj.(*kubev1.VirtualMachineInstance)
if !ok {
return nil, unexpectedObjectError
}
var pvcs []string
for _, vol := range vmi.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
pvcs = append(pvcs, fmt.Sprintf("%s/%s", vmi.Namespace, vol.PersistentVolumeClaim.ClaimName))
}
}
return pvcs, nil
},
}
}
func (f *kubeInformerFactory) VMI() cache.SharedIndexInformer {
return f.getInformer("vmiInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.restClient, "virtualmachineinstances", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &kubev1.VirtualMachineInstance{}, f.defaultResync, GetVMIInformerIndexers())
})
}
func (f *kubeInformerFactory) VMISourceHost(hostName string) cache.SharedIndexInformer {
labelSelector, err := labels.Parse(fmt.Sprintf(kubev1.NodeNameLabel+" in (%s)", hostName))
if err != nil {
panic(err)
}
return f.getInformer("vmiInformer-sources", func() cache.SharedIndexInformer {
lw := NewListWatchFromClient(f.restClient, "virtualmachineinstances", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &kubev1.VirtualMachineInstance{}, f.defaultResync, cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
"node": func(obj interface{}) (strings []string, e error) {
return []string{obj.(*kubev1.VirtualMachineInstance).Status.NodeName}, nil
},
})
})
}
func (f *kubeInformerFactory) VMITargetHost(hostName string) cache.SharedIndexInformer {
labelSelector, err := labels.Parse(fmt.Sprintf(kubev1.MigrationTargetNodeNameLabel+" in (%s)", hostName))
if err != nil {
panic(err)
}
return f.getInformer("vmiInformer-targets", func() cache.SharedIndexInformer {
lw := NewListWatchFromClient(f.restClient, "virtualmachineinstances", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &kubev1.VirtualMachineInstance{}, f.defaultResync, cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
"node": func(obj interface{}) (strings []string, e error) {
return []string{obj.(*kubev1.VirtualMachineInstance).Status.NodeName}, nil
},
})
})
}
func (f *kubeInformerFactory) VMIReplicaSet() cache.SharedIndexInformer {
return f.getInformer("vmirsInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.restClient, "virtualmachineinstancereplicasets", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &kubev1.VirtualMachineInstanceReplicaSet{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) VMPool() cache.SharedIndexInformer {
return f.getInformer("vmpool", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().PoolV1beta1().RESTClient(), "virtualmachinepools", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &poolv1.VirtualMachinePool{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) VirtualMachinePreset() cache.SharedIndexInformer {
return f.getInformer("vmiPresetInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.restClient, "virtualmachineinstancepresets", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &kubev1.VirtualMachineInstancePreset{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func GetVirtualMachineInstanceMigrationInformerIndexers() cache.Indexers {
return cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
ByVMINameIndex: func(obj interface{}) ([]string, error) {
migration, ok := obj.(*kubev1.VirtualMachineInstanceMigration)
if !ok {
return nil, nil
}
return []string{fmt.Sprintf("%s/%s", migration.Namespace, migration.Spec.VMIName)}, nil
},
ByMigrationUIDIndex: func(obj interface{}) ([]string, error) {
migration, ok := obj.(*kubev1.VirtualMachineInstanceMigration)
if !ok {
return nil, nil
}
return []string{string(migration.UID)}, nil
},
UnfinishedIndex: func(obj interface{}) ([]string, error) {
migration, ok := obj.(*kubev1.VirtualMachineInstanceMigration)
if !ok {
return nil, nil
}
if !migration.IsFinal() {
return []string{UnfinishedIndex}, nil
}
return nil, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachineInstanceMigration() cache.SharedIndexInformer {
return f.getInformer("vmimInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.restClient, "virtualmachineinstancemigrations", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &kubev1.VirtualMachineInstanceMigration{}, f.defaultResync, GetVirtualMachineInstanceMigrationInformerIndexers())
})
}
func (f *kubeInformerFactory) KubeVirtPod() cache.SharedIndexInformer {
return f.getInformer("kubeVirtPodInformer", func() cache.SharedIndexInformer {
// Watch all pods with the kubevirt app label
labelSelector, err := labels.Parse(kubev1.AppLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "pods", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.Pod{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) KubeVirtNode() cache.SharedIndexInformer {
return f.getInformer("kubeVirtNodeInformer", func() cache.SharedIndexInformer {
lw := NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "nodes", k8sv1.NamespaceAll, fields.Everything(), labels.Everything())
return cache.NewSharedIndexInformer(lw, &k8sv1.Node{}, f.defaultResync, cache.Indexers{})
})
}
func GetVirtualMachineInformerIndexers() cache.Indexers {
return cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
"dv": func(obj interface{}) ([]string, error) {
vm, ok := obj.(*kubev1.VirtualMachine)
if !ok {
return nil, unexpectedObjectError
}
var dvs []string
for _, vol := range vm.Spec.Template.Spec.Volumes {
if vol.DataVolume != nil {
dvs = append(dvs, fmt.Sprintf("%s/%s", vm.Namespace, vol.DataVolume.Name))
}
}
return dvs, nil
},
"pvc": func(obj interface{}) ([]string, error) {
vm, ok := obj.(*kubev1.VirtualMachine)
if !ok {
return nil, unexpectedObjectError
}
var pvcs []string
for _, vol := range vm.Spec.Template.Spec.Volumes {
if vol.PersistentVolumeClaim != nil {
pvcs = append(pvcs, fmt.Sprintf("%s/%s", vm.Namespace, vol.PersistentVolumeClaim.ClaimName))
}
}
return pvcs, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachine() cache.SharedIndexInformer {
return f.getInformer("vmInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.restClient, "virtualmachines", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &kubev1.VirtualMachine{}, f.defaultResync, GetVirtualMachineInformerIndexers())
})
}
func GetVirtualMachineBackupInformerIndexers() cache.Indexers {
return cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
"vmi": func(obj interface{}) ([]string, error) {
backup, ok := obj.(*backupv1.VirtualMachineBackup)
if !ok {
return nil, unexpectedObjectError
}
if backup.Spec.Source.Kind == v1.VirtualMachineGroupVersionKind.Kind {
return []string{fmt.Sprintf("%s/%s", backup.Namespace, backup.Spec.Source.Name)}, nil
}
return nil, nil
},
"backupTracker": func(obj interface{}) ([]string, error) {
backup, ok := obj.(*backupv1.VirtualMachineBackup)
if !ok {
return nil, unexpectedObjectError
}
if backup.Spec.Source.Kind == backupv1.VirtualMachineBackupTrackerGroupVersionKind.Kind {
return []string{fmt.Sprintf("%s/%s", backup.Namespace, backup.Spec.Source.Name)}, nil
}
return nil, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachineBackup() cache.SharedIndexInformer {
return f.getInformer("vmBackupInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().BackupV1alpha1().RESTClient(), "virtualmachinebackups", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &backupv1.VirtualMachineBackup{}, f.defaultResync, GetVirtualMachineBackupInformerIndexers())
})
}
func GetVirtualMachineBackupTrackerInformerIndexers() cache.Indexers {
return cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
"vmi": func(obj interface{}) ([]string, error) {
tracker, ok := obj.(*backupv1.VirtualMachineBackupTracker)
if !ok {
return nil, unexpectedObjectError
}
source := tracker.Spec.Source
if source.APIGroup != nil &&
*source.APIGroup == core.GroupName &&
source.Kind == "VirtualMachine" {
return []string{fmt.Sprintf("%s/%s", tracker.Namespace, source.Name)}, nil
}
return nil, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachineBackupTracker() cache.SharedIndexInformer {
return f.getInformer("vmBackupTrackerInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().BackupV1alpha1().RESTClient(), "virtualmachinebackuptrackers", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &backupv1.VirtualMachineBackupTracker{}, f.defaultResync, GetVirtualMachineBackupTrackerInformerIndexers())
})
}
func GetVirtualMachineExportInformerIndexers() cache.Indexers {
return cache.Indexers{
"pvc": func(obj interface{}) ([]string, error) {
export, ok := obj.(*exportv1.VirtualMachineExport)
if !ok {
return nil, unexpectedObjectError
}
if (export.Spec.Source.APIGroup == nil ||
*export.Spec.Source.APIGroup == "" || *export.Spec.Source.APIGroup == "v1") &&
export.Spec.Source.Kind == "PersistentVolumeClaim" {
return []string{fmt.Sprintf("%s/%s", export.Namespace, export.Spec.Source.Name)}, nil
}
return nil, nil
},
"vmsnapshot": func(obj interface{}) ([]string, error) {
export, ok := obj.(*exportv1.VirtualMachineExport)
if !ok {
return nil, unexpectedObjectError
}
if export.Spec.Source.APIGroup != nil &&
*export.Spec.Source.APIGroup == snapshotv1.SchemeGroupVersion.Group &&
export.Spec.Source.Kind == "VirtualMachineSnapshot" {
return []string{fmt.Sprintf("%s/%s", export.Namespace, export.Spec.Source.Name)}, nil
}
return nil, nil
},
"vm": func(obj interface{}) ([]string, error) {
export, ok := obj.(*exportv1.VirtualMachineExport)
if !ok {
return nil, unexpectedObjectError
}
if export.Spec.Source.APIGroup != nil &&
*export.Spec.Source.APIGroup == core.GroupName &&
export.Spec.Source.Kind == "VirtualMachine" {
return []string{fmt.Sprintf("%s/%s", export.Namespace, export.Spec.Source.Name)}, nil
}
return nil, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachineExport() cache.SharedIndexInformer {
return f.getInformer("vmExportInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().ExportV1beta1().RESTClient(), "virtualmachineexports", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &exportv1.VirtualMachineExport{}, f.defaultResync, GetVirtualMachineExportInformerIndexers())
})
}
func GetVirtualMachineSnapshotInformerIndexers() cache.Indexers {
return cache.Indexers{
"vm": func(obj interface{}) ([]string, error) {
vms, ok := obj.(*snapshotv1.VirtualMachineSnapshot)
if !ok {
return nil, unexpectedObjectError
}
if vms.Spec.Source.APIGroup != nil &&
*vms.Spec.Source.APIGroup == core.GroupName &&
vms.Spec.Source.Kind == "VirtualMachine" {
return []string{fmt.Sprintf("%s/%s", vms.Namespace, vms.Spec.Source.Name)}, nil
}
return nil, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachineSnapshot() cache.SharedIndexInformer {
return f.getInformer("vmSnapshotInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().SnapshotV1beta1().RESTClient(), "virtualmachinesnapshots", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &snapshotv1.VirtualMachineSnapshot{}, f.defaultResync, GetVirtualMachineSnapshotInformerIndexers())
})
}
func GetVirtualMachineSnapshotContentInformerIndexers() cache.Indexers {
return cache.Indexers{
"volumeSnapshot": func(obj interface{}) ([]string, error) {
vmsc, ok := obj.(*snapshotv1.VirtualMachineSnapshotContent)
if !ok {
return nil, unexpectedObjectError
}
var volumeSnapshots []string
for _, v := range vmsc.Spec.VolumeBackups {
if v.VolumeSnapshotName != nil {
k := fmt.Sprintf("%s/%s", vmsc.Namespace, *v.VolumeSnapshotName)
volumeSnapshots = append(volumeSnapshots, k)
}
}
return volumeSnapshots, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachineSnapshotContent() cache.SharedIndexInformer {
return f.getInformer("vmSnapshotContentInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().SnapshotV1beta1().RESTClient(), "virtualmachinesnapshotcontents", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &snapshotv1.VirtualMachineSnapshotContent{}, f.defaultResync, GetVirtualMachineSnapshotContentInformerIndexers())
})
}
func GetVirtualMachineRestoreInformerIndexers() cache.Indexers {
return cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
"vm": func(obj interface{}) ([]string, error) {
vmr, ok := obj.(*snapshotv1.VirtualMachineRestore)
if !ok {
return nil, unexpectedObjectError
}
if vmr.Spec.Target.APIGroup != nil &&
*vmr.Spec.Target.APIGroup == core.GroupName &&
vmr.Spec.Target.Kind == "VirtualMachine" {
return []string{fmt.Sprintf("%s/%s", vmr.Namespace, vmr.Spec.Target.Name)}, nil
}
return nil, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachineRestore() cache.SharedIndexInformer {
return f.getInformer("vmRestoreInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().SnapshotV1beta1().RESTClient(), "virtualmachinerestores", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &snapshotv1.VirtualMachineRestore{}, f.defaultResync, GetVirtualMachineRestoreInformerIndexers())
})
}
func (f *kubeInformerFactory) MigrationPolicy() cache.SharedIndexInformer {
return f.getInformer("migrationPolicyInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().MigrationsV1alpha1().RESTClient(), migrations.ResourceMigrationPolicies, k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &migrationsv1.MigrationPolicy{}, f.defaultResync, cache.Indexers{})
})
}
func GetVirtualMachineCloneInformerIndexers() cache.Indexers {
getkey := func(vmClone *clone.VirtualMachineClone, resourceName string) string {
return fmt.Sprintf("%s/%s", vmClone.Namespace, resourceName)
}
return cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
// Gets: vm key. Returns: clones that their source or target is the specified vm
"vmSource": func(obj interface{}) ([]string, error) {
vmClone, ok := obj.(*clone.VirtualMachineClone)
if !ok {
return nil, unexpectedObjectError
}
source := vmClone.Spec.Source
if source != nil && source.APIGroup != nil && *source.APIGroup == core.GroupName && source.Kind == "VirtualMachine" {
return []string{getkey(vmClone, source.Name)}, nil
}
return nil, nil
},
"vmTarget": func(obj interface{}) ([]string, error) {
vmClone, ok := obj.(*clone.VirtualMachineClone)
if !ok {
return nil, unexpectedObjectError
}
target := vmClone.Spec.Target
if target != nil && target.APIGroup != nil && *target.APIGroup == core.GroupName && target.Kind == "VirtualMachine" {
return []string{getkey(vmClone, target.Name)}, nil
}
return nil, nil
},
// Gets: snapshot key. Returns: clones that their source is the specified snapshot
"snapshotSource": func(obj interface{}) ([]string, error) {
vmClone, ok := obj.(*clone.VirtualMachineClone)
if !ok {
return nil, unexpectedObjectError
}
source := vmClone.Spec.Source
if source != nil && *source.APIGroup == snapshot.GroupName && source.Kind == "VirtualMachineSnapshot" {
return []string{getkey(vmClone, source.Name)}, nil
}
return nil, nil
},
// Gets: snapshot key. Returns: clones in phase SnapshotInProgress that wait for the specified snapshot
string(clone.SnapshotInProgress): func(obj interface{}) ([]string, error) {
vmClone, ok := obj.(*clone.VirtualMachineClone)
if !ok {
return nil, unexpectedObjectError
}
if vmClone.Status.Phase == clone.SnapshotInProgress && vmClone.Status.SnapshotName != nil {
return []string{getkey(vmClone, *vmClone.Status.SnapshotName)}, nil
}
return nil, nil
},
// Gets: restore key. Returns: clones in phase RestoreInProgress that wait for the specified restore
string(clone.RestoreInProgress): func(obj interface{}) ([]string, error) {
vmClone, ok := obj.(*clone.VirtualMachineClone)
if !ok {
return nil, unexpectedObjectError
}
if vmClone.Status.Phase == clone.RestoreInProgress && vmClone.Status.RestoreName != nil {
return []string{getkey(vmClone, *vmClone.Status.RestoreName)}, nil
}
return nil, nil
},
// Gets: restore key. Returns: clones in phase Succeeded
string(clone.Succeeded): func(obj interface{}) ([]string, error) {
vmClone, ok := obj.(*clone.VirtualMachineClone)
if !ok {
return nil, unexpectedObjectError
}
if vmClone.Status.Phase == clone.Succeeded && vmClone.Status.RestoreName != nil {
return []string{getkey(vmClone, *vmClone.Status.RestoreName)}, nil
}
return nil, nil
},
}
}
func (f *kubeInformerFactory) VirtualMachineClone() cache.SharedIndexInformer {
return f.getInformer("virtualMachineCloneInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().CloneV1beta1().RESTClient(), clonebase.ResourceVMClonePlural, k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &clone.VirtualMachineClone{}, f.defaultResync, GetVirtualMachineCloneInformerIndexers())
})
}
func (f *kubeInformerFactory) VirtualMachineInstancetype() cache.SharedIndexInformer {
return f.getInformer("vmInstancetypeInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().InstancetypeV1beta1().RESTClient(), instancetypeapi.PluralResourceName, k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &instancetypev1beta1.VirtualMachineInstancetype{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) VirtualMachineClusterInstancetype() cache.SharedIndexInformer {
return f.getInformer("vmClusterInstancetypeInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().InstancetypeV1beta1().RESTClient(), instancetypeapi.ClusterPluralResourceName, k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &instancetypev1beta1.VirtualMachineClusterInstancetype{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) VirtualMachinePreference() cache.SharedIndexInformer {
return f.getInformer("vmPreferenceInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().InstancetypeV1beta1().RESTClient(), instancetypeapi.PluralPreferenceResourceName, k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &instancetypev1beta1.VirtualMachinePreference{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) VirtualMachineClusterPreference() cache.SharedIndexInformer {
return f.getInformer("vmClusterPreferenceInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.GeneratedKubeVirtClient().InstancetypeV1beta1().RESTClient(), instancetypeapi.ClusterPluralPreferenceResourceName, k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &instancetypev1beta1.VirtualMachineClusterPreference{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) DataVolume() cache.SharedIndexInformer {
return f.getInformer("dataVolumeInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.CdiClient().CdiV1beta1().RESTClient(), "datavolumes", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &cdiv1.DataVolume{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) DummyDataVolume() cache.SharedIndexInformer {
return f.getInformer("fakeDataVolumeInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&cdiv1.DataVolume{})
return informer
})
}
func (f *kubeInformerFactory) DataSource() cache.SharedIndexInformer {
return f.getInformer("dataSourceInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.CdiClient().CdiV1beta1().RESTClient(), "datasources", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &cdiv1.DataSource{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) DummyDataSource() cache.SharedIndexInformer {
return f.getInformer("fakeDataSourceInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&cdiv1.DataSource{})
return informer
})
}
func (f *kubeInformerFactory) StorageProfile() cache.SharedIndexInformer {
return f.getInformer("storageProfileInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.CdiClient().CdiV1beta1().RESTClient(), "storageprofiles", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &cdiv1.StorageProfile{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) DummyStorageProfile() cache.SharedIndexInformer {
return f.getInformer("fakeStorageProfileInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&cdiv1.StorageProfile{})
return informer
})
}
func (f *kubeInformerFactory) CDI() cache.SharedIndexInformer {
return f.getInformer("cdiInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CdiClient().CdiV1beta1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "cdis", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &cdiv1.CDI{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) DummyCDI() cache.SharedIndexInformer {
return f.getInformer("fakeCdiInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&cdiv1.CDI{})
return informer
})
}
func (f *kubeInformerFactory) CDIConfig() cache.SharedIndexInformer {
return f.getInformer("cdiConfigInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CdiClient().CdiV1beta1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "cdiconfigs", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &cdiv1.CDIConfig{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) DummyCDIConfig() cache.SharedIndexInformer {
return f.getInformer("fakeCdiConfigInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&cdiv1.CDIConfig{})
return informer
})
}
func (f *kubeInformerFactory) Leases() cache.SharedIndexInformer {
return f.getInformer("leasesInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CoordinationV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "leases", f.kubevirtNamespace, fields.Everything())
return cache.NewSharedIndexInformer(lw, &coordinationv1.Lease{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) DummyLeases() cache.SharedIndexInformer {
return f.getInformer("fakeLeasesInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&coordinationv1.Lease{})
return informer
})
}
func (f *kubeInformerFactory) ApiAuthConfigMap() cache.SharedIndexInformer {
return f.getInformer("extensionsConfigMapInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CoreV1().RESTClient()
fieldSelector := fields.OneTermEqualSelector("metadata.name", "extension-apiserver-authentication")
lw := cache.NewListWatchFromClient(restClient, "configmaps", metav1.NamespaceSystem, fieldSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.ConfigMap{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) KubeVirtCAConfigMap() cache.SharedIndexInformer {
return f.getInformer("extensionsKubeVirtCAConfigMapInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CoreV1().RESTClient()
fieldSelector := fields.OneTermEqualSelector("metadata.name", "kubevirt-ca")
lw := cache.NewListWatchFromClient(restClient, "configmaps", f.kubevirtNamespace, fieldSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.ConfigMap{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) KubeVirtExportCAConfigMap() cache.SharedIndexInformer {
return f.getInformer("extensionsKubeVirtExportCAConfigMapInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CoreV1().RESTClient()
fieldSelector := fields.OneTermEqualSelector("metadata.name", "kubevirt-export-ca")
lw := cache.NewListWatchFromClient(restClient, "configmaps", f.kubevirtNamespace, fieldSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.ConfigMap{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) ExportRouteConfigMap() cache.SharedIndexInformer {
return f.getInformer("extensionsExportRouteConfigMapInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CoreV1().RESTClient()
fieldSelector := fields.OneTermEqualSelector("metadata.name", "kube-root-ca.crt")
lw := cache.NewListWatchFromClient(restClient, "configmaps", f.kubevirtNamespace, fieldSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.ConfigMap{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) ExportService() cache.SharedIndexInformer {
return f.getInformer("exportService", func() cache.SharedIndexInformer {
// Watch all service with the kubevirt app label
labelSelector, err := labels.Parse(fmt.Sprintf("%s=%s", kubev1.AppLabel, exportv1.App))
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "services", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.Service{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) PersistentVolumeClaim() cache.SharedIndexInformer {
return f.getInformer("persistentVolumeClaimInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.CoreV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "persistentvolumeclaims", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &k8sv1.PersistentVolumeClaim{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func GetControllerRevisionInformerIndexers() cache.Indexers {
return cache.Indexers{
"vm": func(obj interface{}) ([]string, error) {
cr, ok := obj.(*appsv1.ControllerRevision)
if !ok {
return nil, unexpectedObjectError
}
for _, ref := range cr.OwnerReferences {
if ref.Kind == "VirtualMachine" {
return []string{string(ref.UID)}, nil
}
}
return nil, nil
},
"vmpool": func(obj interface{}) ([]string, error) {
cr, ok := obj.(*appsv1.ControllerRevision)
if !ok {
return nil, unexpectedObjectError
}
for _, ref := range cr.OwnerReferences {
if ref.Kind == "VirtualMachinePool" {
return []string{string(ref.UID)}, nil
}
}
return nil, nil
},
}
}
func (f *kubeInformerFactory) ControllerRevision() cache.SharedIndexInformer {
return f.getInformer("controllerRevisionInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.AppsV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "controllerrevisions", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &appsv1.ControllerRevision{}, f.defaultResync, GetControllerRevisionInformerIndexers())
})
}
func (f *kubeInformerFactory) KubeVirt() cache.SharedIndexInformer {
return f.getInformer("kubeVirtInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.restClient, "kubevirts", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &kubev1.KubeVirt{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
// ResyncPeriod computes the time interval a shared informer waits before resyncing with the api server
func ResyncPeriod(minResyncPeriod time.Duration) time.Duration {
// #nosec no need for better randomness
factor := rand.Float64() + 1
return time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor)
}
func (f *kubeInformerFactory) OperatorServiceAccount() cache.SharedIndexInformer {
return f.getInformer("OperatorServiceAccountInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "serviceaccounts", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.ServiceAccount{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorConfigMap() cache.SharedIndexInformer {
// filter out install strategies
return f.getInformer("OperatorConfigMapInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(fmt.Sprintf("!%s, %s", kubev1.InstallStrategyLabel, OperatorLabel))
if err != nil {
panic(err)
}
restClient := f.clientSet.CoreV1().RESTClient()
lw := NewListWatchFromClient(restClient, "configmaps", f.kubevirtNamespace, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.ConfigMap{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorClusterRole() cache.SharedIndexInformer {
return f.getInformer("OperatorClusterRoleInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.RbacV1().RESTClient(), "clusterroles", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &rbacv1.ClusterRole{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) OperatorClusterRoleBinding() cache.SharedIndexInformer {
return f.getInformer("OperatorClusterRoleBindingInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.RbacV1().RESTClient(), "clusterrolebindings", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &rbacv1.ClusterRoleBinding{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) OperatorRole() cache.SharedIndexInformer {
return f.getInformer("OperatorRoleInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.RbacV1().RESTClient(), "roles", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &rbacv1.Role{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorRoleBinding() cache.SharedIndexInformer {
return f.getInformer("OperatorRoleBindingInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.RbacV1().RESTClient(), "rolebindings", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &rbacv1.RoleBinding{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorCRD() cache.SharedIndexInformer {
return f.getInformer("OperatorCRDInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
ext, err := extclient.NewForConfig(f.clientSet.Config())
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(ext.ApiextensionsV1().RESTClient(), "customresourcedefinitions", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &extv1.CustomResourceDefinition{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) OperatorService() cache.SharedIndexInformer {
return f.getInformer("OperatorServiceInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "services", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.Service{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorDeployment() cache.SharedIndexInformer {
return f.getInformer("OperatorDeploymentInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.AppsV1().RESTClient(), "deployments", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &appsv1.Deployment{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorDaemonSet() cache.SharedIndexInformer {
return f.getInformer("OperatorDaemonSetInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.AppsV1().RESTClient(), "daemonsets", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &appsv1.DaemonSet{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorSCC() cache.SharedIndexInformer {
return f.getInformer("OperatorSCC", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.SecClient().RESTClient(), "securitycontextconstraints", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &secv1.SecurityContextConstraints{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) DummyOperatorSCC() cache.SharedIndexInformer {
return f.getInformer("FakeOperatorSCC", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&secv1.SecurityContextConstraints{})
return informer
})
}
func (f *kubeInformerFactory) Ingress() cache.SharedIndexInformer {
return f.getInformer("Ingress", func() cache.SharedIndexInformer {
restClient := f.clientSet.NetworkingV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "ingresses", f.kubevirtNamespace, fields.Everything())
return cache.NewSharedIndexInformer(lw, &networkingv1.Ingress{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorRoute() cache.SharedIndexInformer {
return f.getInformer("OperatorRoute", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
restClient := f.clientSet.RouteClient().RESTClient()
lw := NewListWatchFromClient(restClient, "routes", f.kubevirtNamespace, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &routev1.Route{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) DummyOperatorRoute() cache.SharedIndexInformer {
return f.getInformer("FakeOperatorRoute", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&routev1.Route{})
return informer
})
}
func (f *kubeInformerFactory) OperatorInstallStrategyConfigMaps() cache.SharedIndexInformer {
return f.getInformer("installStrategyConfigMapInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(kubev1.InstallStrategyLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "configmaps", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.ConfigMap{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorInstallStrategyJob() cache.SharedIndexInformer {
return f.getInformer("installStrategyJobsInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(kubev1.InstallStrategyLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.BatchV1().RESTClient(), "jobs", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &batchv1.Job{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorPod() cache.SharedIndexInformer {
return f.getInformer("operatorPodsInformer", func() cache.SharedIndexInformer {
// Watch all kubevirt infrastructure pods with the operator label
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "pods", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.Pod{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorValidationWebhook() cache.SharedIndexInformer {
return f.getInformer("operatorValidatingWebhookInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.AdmissionregistrationV1().RESTClient(), "validatingwebhookconfigurations", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &admissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorMutatingWebhook() cache.SharedIndexInformer {
return f.getInformer("operatorMutatingWebhookInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.AdmissionregistrationV1().RESTClient(), "mutatingwebhookconfigurations", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &admissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) Secrets() cache.SharedIndexInformer {
return f.getInformer("secretsInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
restClient := f.clientSet.CoreV1().RESTClient()
lw := NewListWatchFromClient(restClient, "secrets", f.kubevirtNamespace, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.Secret{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) UnmanagedSecrets() cache.SharedIndexInformer {
return f.getInformer("secretsInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(NotOperatorLabel)
if err != nil {
panic(err)
}
restClient := f.clientSet.CoreV1().RESTClient()
lw := NewListWatchFromClient(restClient, "secrets", f.kubevirtNamespace, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &k8sv1.Secret{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorAPIService() cache.SharedIndexInformer {
return f.getInformer("operatorAPIServiceInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.aggregatorClient.ApiregistrationV1().RESTClient(), "apiservices", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &apiregv1.APIService{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorPodDisruptionBudget() cache.SharedIndexInformer {
return f.getInformer("operatorPodDisruptionBudgetInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.PolicyV1().RESTClient(), "poddisruptionbudgets", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &policyv1.PodDisruptionBudget{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) OperatorServiceMonitor() cache.SharedIndexInformer {
return f.getInformer("operatorServiceMonitorInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.PrometheusClient().MonitoringV1().RESTClient(), "servicemonitors", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &promv1.ServiceMonitor{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) DummyOperatorServiceMonitor() cache.SharedIndexInformer {
return f.getInformer("FakeOperatorServiceMonitor", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&promv1.ServiceMonitor{})
return informer
})
}
func (f *kubeInformerFactory) OperatorValidatingAdmissionPolicyBinding() cache.SharedIndexInformer {
return f.getInformer("operatorValidatingAdmissionPolicyBindingInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.AdmissionregistrationV1().RESTClient(), "validatingadmissionpolicybindings", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) DummyOperatorValidatingAdmissionPolicyBinding() cache.SharedIndexInformer {
return f.getInformer("FakeOperatorValidatingAdmissionPolicyBindingInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&admissionregistrationv1.ValidatingAdmissionPolicyBinding{})
return informer
})
}
func (f *kubeInformerFactory) OperatorValidatingAdmissionPolicy() cache.SharedIndexInformer {
return f.getInformer("operatorValidatingAdmissionPolicyInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.AdmissionregistrationV1().RESTClient(), "validatingadmissionpolicies", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &admissionregistrationv1.ValidatingAdmissionPolicy{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) DummyOperatorValidatingAdmissionPolicy() cache.SharedIndexInformer {
return f.getInformer("FakeOperatorValidatingAdmissionPolicyInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&admissionregistrationv1.ValidatingAdmissionPolicy{})
return informer
})
}
func (f *kubeInformerFactory) K8SInformerFactory() informers.SharedInformerFactory {
return f.k8sInformers
}
func (f *kubeInformerFactory) CRD() cache.SharedIndexInformer {
return f.getInformer("CRDInformer", func() cache.SharedIndexInformer {
ext, err := extclient.NewForConfig(f.clientSet.Config())
if err != nil {
panic(err)
}
restClient := ext.ApiextensionsV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "customresourcedefinitions", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &extv1.CustomResourceDefinition{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) OperatorPrometheusRule() cache.SharedIndexInformer {
return f.getInformer("OperatorPrometheusRuleInformer", func() cache.SharedIndexInformer {
labelSelector, err := labels.Parse(OperatorLabel)
if err != nil {
panic(err)
}
lw := NewListWatchFromClient(f.clientSet.PrometheusClient().MonitoringV1().RESTClient(), "prometheusrules", k8sv1.NamespaceAll, fields.Everything(), labelSelector)
return cache.NewSharedIndexInformer(lw, &promv1.PrometheusRule{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) DummyOperatorPrometheusRule() cache.SharedIndexInformer {
return f.getInformer("FakeOperatorPrometheusRuleInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&promv1.PrometheusRule{})
return informer
})
}
func (f *kubeInformerFactory) StorageClass() cache.SharedIndexInformer {
return f.getInformer("storageClassInformer", func() cache.SharedIndexInformer {
restClient := f.clientSet.StorageV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "storageclasses", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &storagev1.StorageClass{}, f.defaultResync, cache.Indexers{})
})
}
func (f *kubeInformerFactory) Pod() cache.SharedIndexInformer {
return f.getInformer("podInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "pods", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &k8sv1.Pod{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) ResourceQuota() cache.SharedIndexInformer {
return f.getInformer("resourceQuotaInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.CoreV1().RESTClient(), "resourcequotas", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &k8sv1.ResourceQuota{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) ResourceClaim() cache.SharedIndexInformer {
return f.getInformer("resourceClaimInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.ResourceV1().RESTClient(), "resourceclaims", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &resourcev1.ResourceClaim{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) DummyResourceClaim() cache.SharedIndexInformer {
return f.getInformer("fakeResourceClaimInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&resourcev1.ResourceClaim{})
return informer
})
}
func (f *kubeInformerFactory) ResourceSlice() cache.SharedIndexInformer {
return f.getInformer("resourceSliceInformer", func() cache.SharedIndexInformer {
lw := cache.NewListWatchFromClient(f.clientSet.ResourceV1().RESTClient(), "resourceslices", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &resourcev1.ResourceSlice{}, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
})
}
func (f *kubeInformerFactory) DummyResourceSlice() cache.SharedIndexInformer {
return f.getInformer("fakeResourceSliceInformer", func() cache.SharedIndexInformer {
informer, _ := testutils.NewFakeInformerFor(&resourcev1.ResourceSlice{})
return informer
})
}
// VolumeSnapshotInformer returns an informer for VolumeSnapshots
func VolumeSnapshotInformer(clientSet kubecli.KubevirtClient, resyncPeriod time.Duration) cache.SharedIndexInformer {
restClient := clientSet.KubernetesSnapshotClient().SnapshotV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "volumesnapshots", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &vsv1.VolumeSnapshot{}, resyncPeriod, cache.Indexers{})
}
// VolumeSnapshotClassInformer returns an informer for VolumeSnapshotClasses
func VolumeSnapshotClassInformer(clientSet kubecli.KubevirtClient, resyncPeriod time.Duration) cache.SharedIndexInformer {
restClient := clientSet.KubernetesSnapshotClient().SnapshotV1().RESTClient()
lw := cache.NewListWatchFromClient(restClient, "volumesnapshotclasses", k8sv1.NamespaceAll, fields.Everything())
return cache.NewSharedIndexInformer(lw, &vsv1.VolumeSnapshotClass{}, resyncPeriod, cache.Indexers{})
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2021
*
*/
package defaults
import (
v1 "kubevirt.io/api/core/v1"
)
func setDefaultAmd64DisksBus(spec *v1.VirtualMachineInstanceSpec) {
// Setting SATA as the default bus since it is typically supported out of the box by
// guest operating systems (we support only q35 and therefore IDE is not supported)
// TODO: consider making this OS-specific (VIRTIO for linux, SATA for others)
bus := v1.DiskBusSATA
for i := range spec.Domain.Devices.Disks {
disk := &spec.Domain.Devices.Disks[i].DiskDevice
if disk.Disk != nil && disk.Disk.Bus == "" {
disk.Disk.Bus = bus
}
if disk.CDRom != nil && disk.CDRom.Bus == "" {
disk.CDRom.Bus = bus
}
if disk.LUN != nil && disk.LUN.Bus == "" {
disk.LUN.Bus = bus
}
}
}
// SetAmd64Defaults is mutating function for mutating-webhook
func SetAmd64Defaults(spec *v1.VirtualMachineInstanceSpec) {
setDefaultAmd64DisksBus(spec)
SetAmd64Watchdog(spec)
}
func SetAmd64Watchdog(spec *v1.VirtualMachineInstanceSpec) {
if spec.Domain.Devices.Watchdog != nil {
if spec.Domain.Devices.Watchdog.I6300ESB == nil {
spec.Domain.Devices.Watchdog.I6300ESB = &v1.I6300ESBWatchdog{}
}
if spec.Domain.Devices.Watchdog.I6300ESB.Action == "" {
spec.Domain.Devices.Watchdog.I6300ESB.Action = v1.WatchdogActionReset
}
}
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2021
*
*/
package defaults
import (
v1 "kubevirt.io/api/core/v1"
)
var _false bool = false
const (
defaultCPUModelArm64 = v1.CPUModeHostPassthrough
)
// setDefaultArm64CPUModel set default cpu model to host-passthrough
func setDefaultArm64CPUModel(spec *v1.VirtualMachineInstanceSpec) {
if spec.Domain.CPU == nil {
spec.Domain.CPU = &v1.CPU{}
}
if spec.Domain.CPU.Model == "" {
spec.Domain.CPU.Model = defaultCPUModelArm64
}
}
// setDefaultArm64Bootloader set default bootloader to uefi boot
func setDefaultArm64Bootloader(spec *v1.VirtualMachineInstanceSpec) {
if spec.Domain.Firmware == nil || spec.Domain.Firmware.Bootloader == nil {
if spec.Domain.Firmware == nil {
spec.Domain.Firmware = &v1.Firmware{}
}
if spec.Domain.Firmware.Bootloader == nil {
spec.Domain.Firmware.Bootloader = &v1.Bootloader{}
}
spec.Domain.Firmware.Bootloader.EFI = &v1.EFI{}
spec.Domain.Firmware.Bootloader.EFI.SecureBoot = &_false
}
}
// setDefaultArm64DisksBus set default Disks Bus, because sata is not supported by qemu-kvm of Arm64
func setDefaultArm64DisksBus(spec *v1.VirtualMachineInstanceSpec) {
bus := v1.DiskBusVirtio
for i := range spec.Domain.Devices.Disks {
disk := &spec.Domain.Devices.Disks[i].DiskDevice
if disk.Disk != nil && disk.Disk.Bus == "" {
disk.Disk.Bus = bus
}
if disk.CDRom != nil && disk.CDRom.Bus == "" {
disk.CDRom.Bus = bus
}
if disk.LUN != nil && disk.LUN.Bus == "" {
disk.LUN.Bus = bus
}
}
}
// SetArm64Defaults is mutating function for mutating-webhook
func SetArm64Defaults(spec *v1.VirtualMachineInstanceSpec) {
setDefaultArm64CPUModel(spec)
setDefaultArm64Bootloader(spec)
setDefaultArm64DisksBus(spec)
}
func IsARM64(vmiSpec *v1.VirtualMachineInstanceSpec) bool {
return vmiSpec.Architecture == "arm64"
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package defaults
import (
"context"
"strings"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/liveupdate/memory"
"kubevirt.io/kubevirt/pkg/network/vmispec"
"kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
func SetVirtualMachineDefaults(vm *v1.VirtualMachine, clusterConfig *virtconfig.ClusterConfig, virtClient kubecli.KubevirtClient) {
setDefaultArchitectureFromDataSource(clusterConfig, vm, virtClient)
setDefaultArchitecture(clusterConfig, &vm.Spec.Template.Spec)
setVMDefaultMachineType(vm, clusterConfig)
}
func setVMDefaultMachineType(vm *v1.VirtualMachine, clusterConfig *virtconfig.ClusterConfig) {
// Nothing to do, let's the validating webhook fail later
if vm.Spec.Template == nil {
return
}
if machine := vm.Spec.Template.Spec.Domain.Machine; machine != nil && machine.Type != "" {
return
}
if vm.Spec.Template.Spec.Domain.Machine == nil {
vm.Spec.Template.Spec.Domain.Machine = &v1.Machine{}
}
if vm.Spec.Template.Spec.Domain.Machine.Type == "" {
vm.Spec.Template.Spec.Domain.Machine.Type = clusterConfig.GetMachineType(vm.Spec.Template.Spec.Architecture)
}
}
func SetDefaultVirtualMachineInstance(clusterConfig *virtconfig.ClusterConfig, vmi *v1.VirtualMachineInstance) error {
if err := SetDefaultVirtualMachineInstanceSpec(clusterConfig, &vmi.Spec); err != nil {
return err
}
setDefaultFeatures(&vmi.Spec)
v1.SetObjectDefaults_VirtualMachineInstance(vmi)
setDefaultHypervFeatureDependencies(&vmi.Spec)
setDefaultCPUArch(clusterConfig, &vmi.Spec)
setGuestMemoryStatus(vmi)
setCurrentCPUTopologyStatus(vmi)
// Hotplug needs to be enabled on ARM yet
if !IsARM64(&vmi.Spec) {
setupHotplug(clusterConfig, vmi)
}
return nil
}
func setupHotplug(clusterConfig *virtconfig.ClusterConfig, vmi *v1.VirtualMachineInstance) {
if !clusterConfig.IsVMRolloutStrategyLiveUpdate() {
return
}
setupCPUHotplug(clusterConfig, vmi)
setupMemoryHotplug(clusterConfig, vmi)
}
func setupCPUHotplug(clusterConfig *virtconfig.ClusterConfig, vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU.MaxSockets == 0 {
maxSockets := clusterConfig.GetMaximumCpuSockets()
if vmi.Spec.Domain.CPU.Sockets > maxSockets && maxSockets != 0 {
maxSockets = vmi.Spec.Domain.CPU.Sockets
}
vmi.Spec.Domain.CPU.MaxSockets = maxSockets
}
if vmi.Spec.Domain.CPU.MaxSockets == 0 {
// Each machine type will have different maximum for vcpus,
// lets choose 512 as upper bound
const maxVCPUs = 512
vmi.Spec.Domain.CPU.MaxSockets = vmi.Spec.Domain.CPU.Sockets * clusterConfig.GetMaxHotplugRatio()
totalVCPUs := vmi.Spec.Domain.CPU.MaxSockets * vmi.Spec.Domain.CPU.Cores * vmi.Spec.Domain.CPU.Threads
if totalVCPUs > maxVCPUs {
adjustedSockets := maxVCPUs / (vmi.Spec.Domain.CPU.Cores * vmi.Spec.Domain.CPU.Threads)
vmi.Spec.Domain.CPU.MaxSockets = max(adjustedSockets, vmi.Spec.Domain.CPU.Sockets)
}
}
}
func setupMemoryHotplug(clusterConfig *virtconfig.ClusterConfig, vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Memory.MaxGuest != nil {
return
}
var maxGuest *resource.Quantity
switch {
case clusterConfig.GetMaximumGuestMemory() != nil:
maxGuest = clusterConfig.GetMaximumGuestMemory()
case vmi.Spec.Domain.Memory.Guest != nil:
maxGuest = resource.NewQuantity(vmi.Spec.Domain.Memory.Guest.Value()*int64(clusterConfig.GetMaxHotplugRatio()), resource.BinarySI)
}
if err := memory.ValidateLiveUpdateMemory(&vmi.Spec, maxGuest); err != nil {
// memory hotplug is not compatible with this VM configuration
log.Log.V(2).Object(vmi).Infof("memory-hotplug disabled: %s", err)
return
}
vmi.Spec.Domain.Memory.MaxGuest = maxGuest
}
func setCurrentCPUTopologyStatus(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU != nil && vmi.Status.CurrentCPUTopology == nil {
vmi.Status.CurrentCPUTopology = &v1.CPUTopology{
Sockets: vmi.Spec.Domain.CPU.Sockets,
Cores: vmi.Spec.Domain.CPU.Cores,
Threads: vmi.Spec.Domain.CPU.Threads,
}
}
}
func setGuestMemoryStatus(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Memory != nil &&
vmi.Spec.Domain.Memory.Guest != nil {
vmi.Status.Memory = &v1.MemoryStatus{
GuestAtBoot: vmi.Spec.Domain.Memory.Guest,
GuestCurrent: vmi.Spec.Domain.Memory.Guest,
GuestRequested: vmi.Spec.Domain.Memory.Guest,
}
}
}
func setDefaultFeatures(spec *v1.VirtualMachineInstanceSpec) {
if IsS390X(spec) {
setS390xDefaultFeatures(spec)
}
}
func setDefaultCPUArch(clusterConfig *virtconfig.ClusterConfig, spec *v1.VirtualMachineInstanceSpec) {
// Do some CPU arch specific setting.
switch {
case IsARM64(spec):
log.Log.V(4).Info("Apply Arm64 specific setting")
SetArm64Defaults(spec)
case IsS390X(spec):
log.Log.V(4).Info("Apply s390x specific setting")
SetS390xDefaults(spec)
default:
SetAmd64Defaults(spec)
}
setDefaultCPUModel(clusterConfig, spec)
}
func setDefaultHypervFeatureDependencies(spec *v1.VirtualMachineInstanceSpec) {
// In a future, yet undecided, release either libvirt or QEMU are going to check the hyperv dependencies, so we can get rid of this code.
// Until that time, we need to handle the hyperv deps to avoid obscure rejections from QEMU later on
log.Log.V(4).Info("Set HyperV dependencies")
if err := SetHypervFeatureDependencies(spec); err != nil {
// HyperV is a special case. If our best-effort attempt fails, we should leave
// rejection to be performed later on in the validating webhook, and continue here.
// Please note this means that partial changes may have been performed.
// This is OK since each dependency must be atomic and independent (in ACID sense),
// so the VMI configuration is still legal.
log.Log.V(2).Infof("Failed to set HyperV dependencies: %s", err)
}
}
func SetDefaultVirtualMachineInstanceSpec(clusterConfig *virtconfig.ClusterConfig, spec *v1.VirtualMachineInstanceSpec) error {
setDefaultArchitecture(clusterConfig, spec)
setDefaultMachineType(clusterConfig, spec)
setDefaultResourceRequests(clusterConfig, spec)
setGuestMemory(spec)
SetDefaultGuestCPUTopology(clusterConfig, spec)
setDefaultPullPoliciesOnContainerDisks(spec)
setDefaultEvictionStrategy(clusterConfig, spec)
if err := vmispec.SetDefaultNetworkInterface(clusterConfig, spec); err != nil {
return err
}
util.SetDefaultVolumeDisk(spec)
return nil
}
func setDefaultEvictionStrategy(clusterConfig *virtconfig.ClusterConfig, spec *v1.VirtualMachineInstanceSpec) {
if spec.EvictionStrategy == nil {
spec.EvictionStrategy = clusterConfig.GetConfig().EvictionStrategy
}
}
func setDefaultMachineType(clusterConfig *virtconfig.ClusterConfig, spec *v1.VirtualMachineInstanceSpec) {
machineType := clusterConfig.GetMachineType(spec.Architecture)
if machine := spec.Domain.Machine; machine != nil {
if machine.Type == "" {
machine.Type = machineType
}
} else {
spec.Domain.Machine = &v1.Machine{Type: machineType}
}
}
func setDefaultPullPoliciesOnContainerDisks(spec *v1.VirtualMachineInstanceSpec) {
for _, volume := range spec.Volumes {
if volume.ContainerDisk != nil && volume.ContainerDisk.ImagePullPolicy == "" {
if strings.HasSuffix(volume.ContainerDisk.Image, ":latest") || !strings.ContainsAny(volume.ContainerDisk.Image, ":@") {
volume.ContainerDisk.ImagePullPolicy = k8sv1.PullAlways
} else {
volume.ContainerDisk.ImagePullPolicy = k8sv1.PullIfNotPresent
}
}
}
}
func setGuestMemory(spec *v1.VirtualMachineInstanceSpec) {
if spec.Domain.Memory != nil &&
spec.Domain.Memory.Guest != nil {
return
}
if spec.Domain.Memory == nil {
spec.Domain.Memory = &v1.Memory{}
}
switch {
case !spec.Domain.Resources.Requests.Memory().IsZero():
spec.Domain.Memory.Guest = spec.Domain.Resources.Requests.Memory()
case !spec.Domain.Resources.Limits.Memory().IsZero():
spec.Domain.Memory.Guest = spec.Domain.Resources.Limits.Memory()
case spec.Domain.Memory.Hugepages != nil:
if hugepagesSize, err := resource.ParseQuantity(spec.Domain.Memory.Hugepages.PageSize); err == nil {
spec.Domain.Memory.Guest = &hugepagesSize
}
}
}
func setDefaultResourceRequests(clusterConfig *virtconfig.ClusterConfig, spec *v1.VirtualMachineInstanceSpec) {
resources := &spec.Domain.Resources
if !resources.Limits.Cpu().IsZero() && resources.Requests.Cpu().IsZero() {
if resources.Requests == nil {
resources.Requests = k8sv1.ResourceList{}
}
resources.Requests[k8sv1.ResourceCPU] = resources.Limits[k8sv1.ResourceCPU]
}
if cpuRequest := clusterConfig.GetCPURequest(); !cpuRequest.Equal(resource.MustParse(virtconfig.DefaultCPURequest)) {
if _, exists := resources.Requests[k8sv1.ResourceCPU]; !exists {
if spec.Domain.CPU != nil && spec.Domain.CPU.DedicatedCPUPlacement {
return
}
if resources.Requests == nil {
resources.Requests = k8sv1.ResourceList{}
}
resources.Requests[k8sv1.ResourceCPU] = *cpuRequest
}
}
}
func SetDefaultGuestCPUTopology(clusterConfig *virtconfig.ClusterConfig, spec *v1.VirtualMachineInstanceSpec) {
cores := uint32(1)
threads := uint32(1)
sockets := uint32(1)
vmiCPU := spec.Domain.CPU
if vmiCPU == nil || (vmiCPU.Cores == 0 && vmiCPU.Sockets == 0 && vmiCPU.Threads == 0) {
// create cpu topology struct
if spec.Domain.CPU == nil {
spec.Domain.CPU = &v1.CPU{}
}
//if cores, sockets, threads are not set, take value from domain resources request or limits and
//set value into sockets, which have best performance (https://bugzilla.redhat.com/show_bug.cgi?id=1653453)
resources := spec.Domain.Resources
if cpuLimit, ok := resources.Limits[k8sv1.ResourceCPU]; ok {
sockets = uint32(cpuLimit.Value())
} else if cpuRequests, ok := resources.Requests[k8sv1.ResourceCPU]; ok {
sockets = uint32(cpuRequests.Value())
}
spec.Domain.CPU.Sockets = sockets
spec.Domain.CPU.Cores = cores
spec.Domain.CPU.Threads = threads
}
}
func setDefaultCPUModel(clusterConfig *virtconfig.ClusterConfig, spec *v1.VirtualMachineInstanceSpec) {
// create cpu topology struct
if spec.Domain.CPU == nil {
spec.Domain.CPU = &v1.CPU{}
}
// if vmi doesn't have cpu model set
if spec.Domain.CPU.Model == "" {
if clusterConfigCPUModel := clusterConfig.GetCPUModel(); clusterConfigCPUModel != "" {
//set is as vmi cpu model
spec.Domain.CPU.Model = clusterConfigCPUModel
} else {
spec.Domain.CPU.Model = v1.DefaultCPUModel
}
}
}
func setDefaultArchitecture(clusterConfig *virtconfig.ClusterConfig, spec *v1.VirtualMachineInstanceSpec) {
if spec.Architecture == "" {
spec.Architecture = clusterConfig.GetDefaultArchitecture()
}
}
func setDefaultArchitectureFromDataSource(clusterConfig *virtconfig.ClusterConfig, vm *v1.VirtualMachine, virtClient kubecli.KubevirtClient) {
const (
dataSourceKind = "datasource"
templateArchLabel = "template.kubevirt.io/architecture"
ignoreFailureErrorFmt = "ignoring failure to find datasource during vm mutation: %v"
ignoreUnknownArchFmt = "ignoring unknown architecture %s provided by DataSource %s in namespace %s"
)
if vm.Spec.Template.Spec.Architecture != "" {
return
}
for _, template := range vm.Spec.DataVolumeTemplates {
if template.Spec.SourceRef == nil || !strings.EqualFold(template.Spec.SourceRef.Kind, dataSourceKind) {
continue
}
namespace := vm.Namespace
templateNamespace := template.Spec.SourceRef.Namespace
if templateNamespace != nil && *templateNamespace != "" {
namespace = *templateNamespace
}
ds, err := virtClient.CdiClient().CdiV1beta1().DataSources(namespace).Get(
context.Background(), template.Spec.SourceRef.Name, metav1.GetOptions{})
if err != nil {
log.Log.Errorf(ignoreFailureErrorFmt, err)
continue
}
if ds.Spec.Source.DataSource != nil {
ds, err = virtClient.CdiClient().CdiV1beta1().DataSources(ds.Spec.Source.DataSource.Namespace).Get(
context.Background(), ds.Spec.Source.DataSource.Name, metav1.GetOptions{})
if err != nil {
log.Log.Errorf(ignoreFailureErrorFmt, err)
continue
}
}
arch, ok := ds.Labels[templateArchLabel]
if !ok {
continue
}
switch arch {
case "amd64", "arm64", "s390x":
vm.Spec.Template.Spec.Architecture = arch
default:
log.Log.Warningf(ignoreUnknownArchFmt, arch, ds.Name, ds.Namespace)
continue
}
return
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package defaults
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
nodelabellerutil "kubevirt.io/kubevirt/pkg/virt-handler/node-labeller/util"
)
var _true bool = true
func enableFeatureState(fs **v1.FeatureState) {
var val *v1.FeatureState
if *fs != nil {
val = *fs
} else {
val = &v1.FeatureState{}
}
val.Enabled = &_true
*fs = val
}
func isFeatureStateMissing(fs **v1.FeatureState) bool {
return *fs == nil || (*fs).Enabled == nil
}
// TODO: this dupes code in pkg/virt-controller/services/template.go
func isFeatureStateEnabled(fs **v1.FeatureState) bool {
return !isFeatureStateMissing(fs) && *((*fs).Enabled)
}
type HypervFeature struct {
State **v1.FeatureState
Field *k8sfield.Path
Requires *HypervFeature
}
func (hf HypervFeature) isRequirementOK() bool {
if !isFeatureStateEnabled(hf.State) {
return true
}
if hf.Requires == nil {
return true
}
return isFeatureStateEnabled(hf.Requires.State)
}
// a requirement is compatible if
// 1. it is already enabled (either by the user or by us previously)
// 2. the user has not set it, so we can do on its behalf
func (hf HypervFeature) TryToSetRequirement() error {
if !isFeatureStateEnabled(hf.State) || hf.Requires == nil {
// not enabled or no requirements: nothing to do
return nil
}
if isFeatureStateMissing(hf.Requires.State) {
enableFeatureState(hf.Requires.State)
return nil
}
if isFeatureStateEnabled(hf.Requires.State) {
return nil
}
return fmt.Errorf("%s", hf.String())
}
func (hf HypervFeature) IsRequirementFulfilled() (metav1.StatusCause, bool) {
if !hf.isRequirementOK() {
return metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: hf.String(),
Field: hf.Field.String(),
}, false
}
return metav1.StatusCause{}, true
}
func (hf HypervFeature) String() string {
if hf.Requires == nil {
return fmt.Sprintf("'%s' is missing", hf.Field.String())
}
return fmt.Sprintf("'%s' requires '%s', which was disabled.", hf.Field.String(), hf.Requires.Field.String())
}
func getHypervFeatureDependencies(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []HypervFeature {
if spec.Domain.Features == nil || spec.Domain.Features.Hyperv == nil {
return []HypervFeature{}
}
hyperv := spec.Domain.Features.Hyperv // shortcut
hypervField := field.Child("domain", "features", "hyperv") // shortcut
vpindex := HypervFeature{
State: &hyperv.VPIndex,
Field: hypervField.Child("vpindex"),
}
synic := HypervFeature{
State: &hyperv.SyNIC,
Field: hypervField.Child("synic"),
Requires: &vpindex,
}
vapic := HypervFeature{
State: &hyperv.VAPIC,
Field: hypervField.Child("vapic"),
}
syNICTimer := &v1.FeatureState{}
if hyperv.SyNICTimer != nil {
syNICTimer.Enabled = hyperv.SyNICTimer.Enabled
}
features := []HypervFeature{
// keep in REVERSE order: leaves first.
{
State: &hyperv.EVMCS,
Field: hypervField.Child("evmcs"),
Requires: &vapic,
},
{
State: &hyperv.IPI,
Field: hypervField.Child("ipi"),
Requires: &vpindex,
},
{
State: &hyperv.TLBFlush,
Field: hypervField.Child("tlbflush"),
Requires: &vpindex,
},
{
State: &syNICTimer,
Field: hypervField.Child("synictimer"),
Requires: &synic,
},
synic,
}
return features
}
func SetHypervFeatureDependencies(spec *v1.VirtualMachineInstanceSpec) error {
path := k8sfield.NewPath("spec")
if features := getHypervFeatureDependencies(path, spec); features != nil {
for _, feat := range features {
if err := feat.TryToSetRequirement(); err != nil {
return err
}
}
}
//Check if vmi has EVMCS feature enabled. If yes, we have to add vmx cpu feature
if spec.Domain.Features != nil && spec.Domain.Features.Hyperv != nil && spec.Domain.Features.Hyperv.EVMCS != nil &&
(spec.Domain.Features.Hyperv.EVMCS.Enabled == nil || (*spec.Domain.Features.Hyperv.EVMCS.Enabled)) {
setEVMCSDependency(spec)
}
return nil
}
func setEVMCSDependency(spec *v1.VirtualMachineInstanceSpec) {
vmxFeature := v1.CPUFeature{
Name: nodelabellerutil.VmxFeature,
Policy: nodelabellerutil.RequirePolicy,
}
cpuFeatures := []v1.CPUFeature{
vmxFeature,
}
if spec.Domain.CPU == nil {
spec.Domain.CPU = &v1.CPU{
Features: cpuFeatures,
}
return
}
if len(spec.Domain.CPU.Features) == 0 {
spec.Domain.CPU.Features = cpuFeatures
return
}
for _, requiredFeature := range cpuFeatures {
featureFound := false
for i, existingFeature := range spec.Domain.CPU.Features {
if existingFeature.Name == requiredFeature.Name {
featureFound = true
if existingFeature.Policy != requiredFeature.Policy {
spec.Domain.CPU.Features[i].Policy = requiredFeature.Policy
}
break
}
}
if !featureFound {
spec.Domain.CPU.Features = append(spec.Domain.CPU.Features, requiredFeature)
}
}
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
package defaults
import (
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/pointer"
)
func setDefaultS390xDisksBus(spec *v1.VirtualMachineInstanceSpec) {
bus := v1.DiskBusVirtio
for i := range spec.Domain.Devices.Disks {
disk := &spec.Domain.Devices.Disks[i].DiskDevice
if disk.Disk != nil && disk.Disk.Bus == "" {
disk.Disk.Bus = bus
}
if disk.CDRom != nil && disk.CDRom.Bus == "" {
disk.CDRom.Bus = bus
}
if disk.LUN != nil && disk.LUN.Bus == "" {
disk.LUN.Bus = bus
}
}
}
// Disable ACPI Feature by default on s390x, since it is not supported
func setS390xDefaultFeatures(spec *v1.VirtualMachineInstanceSpec) {
featureStateDisabled := v1.FeatureState{Enabled: pointer.P[bool](false)}
if spec.Domain.Features == nil {
spec.Domain.Features = &v1.Features{
ACPI: featureStateDisabled,
}
} else if spec.Domain.Features.ACPI.Enabled == nil {
spec.Domain.Features.ACPI.Enabled = pointer.P[bool](false)
}
}
// SetS390xDefaults is mutating function for mutating-webhook
func SetS390xDefaults(spec *v1.VirtualMachineInstanceSpec) {
setDefaultS390xDisksBus(spec)
SetS390xWatchdog(spec)
}
func IsS390X(vmiSpec *v1.VirtualMachineInstanceSpec) bool {
return vmiSpec.Architecture == "s390x"
}
func SetS390xWatchdog(spec *v1.VirtualMachineInstanceSpec) {
if spec.Domain.Devices.Watchdog != nil {
if spec.Domain.Devices.Watchdog.Diag288 == nil {
spec.Domain.Devices.Watchdog.Diag288 = &v1.Diag288Watchdog{}
}
if spec.Domain.Devices.Watchdog.Diag288.Action == "" {
spec.Domain.Devices.Watchdog.Diag288.Action = v1.WatchdogActionReset
}
}
}
package downwardmetrics
import (
"path/filepath"
"strconv"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/config"
"kubevirt.io/kubevirt/pkg/downwardmetrics/vhostmd"
"kubevirt.io/kubevirt/pkg/util"
)
const (
DownwardMetricsSerialDeviceName = "org.github.vhostmd.1"
DownwardMetricsChannelDir = util.VirtPrivateDir + "/downwardmetrics-channel"
DownwardMetricsChannelSocket = DownwardMetricsChannelDir + "/downwardmetrics.sock"
)
func CreateDownwardMetricDisk(vmi *v1.VirtualMachineInstance) error {
for _, volume := range vmi.Spec.Volumes {
if volume.DownwardMetrics != nil {
return vhostmd.NewMetricsIODisk(config.DownwardMetricDisk).Create()
}
}
return nil
}
func FormatDownwardMetricPath(pid int) string {
return filepath.Join("/proc", strconv.Itoa(pid), "/root", config.DownwardMetricDisk)
}
func HasDownwardMetricDisk(vmi *v1.VirtualMachineInstance) bool {
for _, volume := range vmi.Spec.Volumes {
if volume.DownwardMetrics != nil {
return true
}
}
return false
}
func HasDevice(spec *v1.VirtualMachineInstanceSpec) bool {
return spec.Domain.Devices.DownwardMetrics != nil
}
func ChannelSocketPathOnHost(pid int) string {
return filepath.Join("/proc", strconv.Itoa(pid), "root", DownwardMetricsChannelSocket)
}
package vhostmd
import (
"encoding/binary"
"encoding/xml"
"fmt"
"io"
"os"
"strings"
"kubevirt.io/kubevirt/pkg/downwardmetrics/vhostmd/api"
"kubevirt.io/kubevirt/pkg/util"
)
const fileSize = 262144
const maxBodyLength = fileSize - 24
var signature = [4]byte{'m', 'v', 'b', 'd'}
type vhostmd struct {
filePath string
}
type Header struct {
Signature [4]byte
Flag int32
Checksum int32
Length int32
}
type Disk struct {
Header *Header
Raw []byte
}
func (d *Disk) String() string {
return fmt.Sprintf("%v:%v:%v:%v", string(d.Header.Signature[:]), d.Header.Flag, d.Header.Checksum, d.Header.Length)
}
func (d *Disk) Verify() error {
var checksum int32
for _, b := range d.Raw {
checksum = checksum + int32(b)
}
if d.Header.Flag > 0 {
return fmt.Errorf("file is locked")
}
if checksum != d.Header.Checksum {
return fmt.Errorf("checksum is %v, but expected %v", checksum, d.Header.Checksum)
}
return nil
}
func (d *Disk) Metrics() (*api.Metrics, error) {
m := &api.Metrics{}
if err := xml.Unmarshal(d.Raw, m); err != nil {
return nil, err
}
m.Text = strings.TrimSpace(m.Text)
for i, metric := range m.Metrics {
m.Metrics[i].Name = strings.TrimSpace(metric.Name)
m.Metrics[i].Type = api.MetricType(strings.TrimSpace(string(metric.Type)))
m.Metrics[i].Context = api.MetricContext(strings.TrimSpace(string(metric.Context)))
m.Metrics[i].Value = strings.TrimSpace(metric.Value)
m.Metrics[i].Text = strings.TrimSpace(metric.Text)
}
return m, nil
}
func (v *vhostmd) Create() error {
return createDisk(v.filePath)
}
func (v *vhostmd) Read() (*api.Metrics, error) {
disk, err := readDisk(v.filePath)
if err != nil {
return nil, fmt.Errorf("failed to load vhostmd file: %v", err)
}
if err := disk.Verify(); err != nil {
return nil, fmt.Errorf("failed to verify vhostmd file: %v", err)
}
return disk.Metrics()
}
func (v *vhostmd) Write(metrics *api.Metrics) (err error) {
f, err := os.OpenFile(v.filePath, os.O_RDWR, 0)
if err != nil {
return fmt.Errorf("failed to open vhostmd disk: %v", err)
}
defer func() {
if fileErr := f.Close(); fileErr != nil && err == nil {
err = fileErr
}
}()
if err := writeDisk(f, metrics); err != nil {
return fmt.Errorf("failed to write metrics: %v", err)
}
return nil
}
func readDisk(filePath string) (*Disk, error) {
f, err := os.Open(filePath)
if err != nil {
return nil, err
}
// If the read operation succeeds, but close fails, we have already read the data,
// so it is ok to not return the error.
defer util.CloseIOAndCheckErr(f, nil)
d := &Disk{
Header: &Header{},
}
if err = binary.Read(f, binary.BigEndian, d.Header); err != nil {
return nil, err
}
if d.Header.Flag == 0 {
if d.Header.Length > maxBodyLength {
return nil, fmt.Errorf("Invalid metrics file. Expected a maximum body length of %v, got %v", maxBodyLength, d.Header.Length)
}
d.Raw = make([]byte, d.Header.Length, d.Header.Length)
if _, err = io.ReadFull(f, d.Raw); err != nil {
return nil, err
}
}
return d, err
}
func createDisk(filePath string) (err error) {
var f *os.File
if f, err = os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, 0755); err != nil {
return fmt.Errorf("failed getting vhostmd disk filestats: %v", err)
}
defer func() {
if fileErr := f.Close(); fileErr != nil && err == nil {
err = fileErr
}
}()
_, err = f.Seek(fileSize-1, 0)
if err != nil {
return fmt.Errorf("preallocating vhostmd disk failed: %v", err)
}
_, err = f.Write([]byte{0})
if err != nil {
return fmt.Errorf("preallocating vhostmd disk failed: %v", err)
}
_, err = f.Seek(0, 0)
if err != nil {
return fmt.Errorf("moving back to file start failed: %v", err)
}
return writeDisk(f, &api.Metrics{})
}
func writeDisk(file *os.File, m *api.Metrics) (err error) {
d := emptyLockedDisk()
if d.Raw, err = xml.MarshalIndent(m, "", " "); err != nil {
return fmt.Errorf("failed to encode metrics: %v", err)
}
// Add newline, since `vm-dump-metrics` does not append a newline when writing to metrics
d.Raw = append(d.Raw, '\n')
if len(d.Raw) > maxBodyLength {
return fmt.Errorf("vhostmd metrics body is too big, expected a maximum of %v, got %v", maxBodyLength, len(d.Raw))
}
var checksum int32
for _, b := range d.Raw {
checksum = checksum + int32(b)
}
d.Header.Checksum = checksum
d.Header.Length = int32(len(d.Raw))
if err = binary.Write(file, binary.BigEndian, d.Header); err != nil {
return fmt.Errorf("failed to write vhostmd header: %v", err)
}
if err = file.Sync(); err != nil {
return fmt.Errorf("failed to flush to vhostmd file, when trying to lock it: %v", err)
}
if _, err = file.Write(d.Raw); err != nil {
return fmt.Errorf("failed to write vhostmd body: %v", err)
}
_, err = file.Seek(0, 0)
if err != nil {
return fmt.Errorf("moving back to file start failed: %v", err)
}
d.Header.Flag = 0
if err = binary.Write(file, binary.BigEndian, d.Header); err != nil {
return fmt.Errorf("failed to unlock vhostmd file: %v", err)
}
return nil
}
func emptyLockedDisk() *Disk {
return &Disk{
Header: &Header{
Signature: signature,
Flag: 1,
Checksum: 0,
Length: 0,
},
}
}
package vhostmd
import "kubevirt.io/kubevirt/pkg/downwardmetrics/vhostmd/api"
type MetricsIO interface {
Create() error
Read() (*api.Metrics, error)
Write(metrics *api.Metrics) error
}
func NewMetricsIODisk(filePath string) *vhostmd {
return &vhostmd{filePath: filePath}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
drautil "kubevirt.io/kubevirt/pkg/dra"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
type Validator struct {
field *k8sfield.Path
vmiSpec *v1.VirtualMachineInstanceSpec
configChecker GPUDRAConfigChecker
}
type GPUDRAConfigChecker interface {
GPUsWithDRAGateEnabled() bool
}
func NewValidator(field *k8sfield.Path, vmiSpec *v1.VirtualMachineInstanceSpec, configChecker GPUDRAConfigChecker) *Validator {
return &Validator{
field: field,
vmiSpec: vmiSpec,
configChecker: configChecker,
}
}
func (v Validator) ValidateCreation() []metav1.StatusCause {
var causes []metav1.StatusCause
causes = append(causes, validateCreationDRA(v.field, v.vmiSpec, v.configChecker)...)
return causes
}
func (v Validator) Validate() []metav1.StatusCause {
return validateCreationDRA(v.field, v.vmiSpec, v.configChecker)
}
func validateCreationDRA(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, checker GPUDRAConfigChecker) []metav1.StatusCause {
var (
causes []metav1.StatusCause
draGPUs []v1.GPU
nonDRAGPUs []v1.GPU
)
for _, gpu := range spec.Domain.Devices.GPUs {
if drautil.IsGPUDRA(gpu) {
draGPUs = append(draGPUs, gpu)
} else {
nonDRAGPUs = append(nonDRAGPUs, gpu)
}
}
// If a gpu is non-DRA, it must have only deviceName configured
for _, gpu := range nonDRAGPUs {
if gpu.DeviceName == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "vmi.spec.domain.devices.gpus contains GPUs without deviceName",
Field: field.Child("spec", "domain", "devices", "gpus").String(),
})
return causes
}
if gpu.DeviceName != "" && gpu.ClaimRequest != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "vmi.spec.domain.devices.gpus contains GPUs with both deviceName and claimRequest",
Field: field.Child("spec", "domain", "devices", "gpus").String(),
})
}
}
if len(draGPUs) > 0 && !checker.GPUsWithDRAGateEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "vmi.spec.domain.devices.gpus contains DRA enabled GPUs but feature gate is not enabled",
Field: field.Child("spec", "domain", "devices", "gpus").String(),
})
return causes
}
claimNamesFromGPUs := sets.New[string]()
for _, gpu := range draGPUs {
claimNamesFromGPUs.Insert(*gpu.ClaimName)
}
claimNamesFromRC := sets.New[string]()
for _, rc := range spec.ResourceClaims {
claimNamesFromRC.Insert(rc.Name)
}
if !claimNamesFromRC.IsSuperset(claimNamesFromGPUs) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "vmi.spec.resourceClaims must specify all claims used in vmi.spec.domain.devices.gpus",
Field: field.Child("resourceClaims").String(),
})
return causes
}
return causes
}
func ValidateCreation(field *k8sfield.Path, vmiSpec *v1.VirtualMachineInstanceSpec, clusterCfg *virtconfig.ClusterConfig) []metav1.StatusCause {
return NewValidator(field, vmiSpec, clusterCfg).ValidateCreation()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package dra
import v1 "kubevirt.io/api/core/v1"
// IsAllDRAGPUsReconciled checks if all GPUs with DRA in the VMI spec have corresponding status entries populated
// with either a PCI address (pGPU) or an mdev UUID (vGPU). It is used by both virt-handler and virt-controller
// to decide whether GPU-related DRA reconciliation is complete.
func IsAllDRAGPUsReconciled(vmi *v1.VirtualMachineInstance, status *v1.DeviceStatus) bool {
draGPUNames := make(map[string]struct{})
for _, gpu := range vmi.Spec.Domain.Devices.GPUs {
if gpu.ClaimRequest != nil {
draGPUNames[gpu.Name] = struct{}{}
}
}
if len(draGPUNames) == 0 {
return true
}
reconciledCount := 0
if status != nil {
for _, gpuStatus := range status.GPUStatuses {
if _, isDRAGPU := draGPUNames[gpuStatus.Name]; !isDRAGPU {
continue
}
if gpuStatus.DeviceResourceClaimStatus != nil &&
gpuStatus.DeviceResourceClaimStatus.ResourceClaimName != nil &&
gpuStatus.DeviceResourceClaimStatus.Name != nil &&
gpuStatus.DeviceResourceClaimStatus.Attributes != nil &&
(gpuStatus.DeviceResourceClaimStatus.Attributes.PCIAddress != nil ||
gpuStatus.DeviceResourceClaimStatus.Attributes.MDevUUID != nil) {
reconciledCount++
}
}
}
return reconciledCount == len(draGPUNames)
}
// IsAllDRAHostDevicesReconciled checks if all HostDevices with DRA in the VMI spec have corresponding status entries populated
// with either a PCI address (e.g., SR-IOV) or an mdev UUID when mediated devices are used. It mirrors the semantics of
// IsAllDRAGPUsReconciled but operates on spec.domain.devices.hostDevices instead of GPUs.
func IsAllDRAHostDevicesReconciled(vmi *v1.VirtualMachineInstance, status *v1.DeviceStatus) bool {
draHostDeviceNames := make(map[string]struct{})
for _, hd := range vmi.Spec.Domain.Devices.HostDevices {
if hd.ClaimRequest != nil {
draHostDeviceNames[hd.Name] = struct{}{}
}
}
if len(draHostDeviceNames) == 0 {
return true
}
reconciledCount := 0
if status != nil {
for _, hdStatus := range status.HostDeviceStatuses {
if _, isDRAHostDev := draHostDeviceNames[hdStatus.Name]; !isDRAHostDev {
continue
}
if hdStatus.DeviceResourceClaimStatus != nil &&
hdStatus.DeviceResourceClaimStatus.ResourceClaimName != nil &&
hdStatus.DeviceResourceClaimStatus.Name != nil &&
hdStatus.DeviceResourceClaimStatus.Attributes != nil &&
(hdStatus.DeviceResourceClaimStatus.Attributes.PCIAddress != nil ||
hdStatus.DeviceResourceClaimStatus.Attributes.MDevUUID != nil) {
reconciledCount++
}
}
}
return reconciledCount == len(draHostDeviceNames)
}
// IsGPUDRA returns true if the GPU is a DRA GPU
func IsGPUDRA(gpu v1.GPU) bool {
return gpu.DeviceName == "" && gpu.ClaimRequest != nil
}
// IsHostDeviceDRA returns true if the HostDevice is a DRA GPU
func IsHostDeviceDRA(hd v1.HostDevice) bool {
return hd.DeviceName == "" && hd.ClaimRequest != nil
}
// Code generated by MockGen. DO NOT EDIT.
// Source: utils.go
//
// Generated by this command:
//
// mockgen -source utils.go -package=ephemeraldiskutils -destination=generated_mock_utils.go
//
// Package ephemeraldiskutils is a generated GoMock package.
package ephemeraldiskutils
import (
reflect "reflect"
gomock "go.uber.org/mock/gomock"
safepath "kubevirt.io/kubevirt/pkg/safepath"
)
// MockOwnershipManagerInterface is a mock of OwnershipManagerInterface interface.
type MockOwnershipManagerInterface struct {
ctrl *gomock.Controller
recorder *MockOwnershipManagerInterfaceMockRecorder
isgomock struct{}
}
// MockOwnershipManagerInterfaceMockRecorder is the mock recorder for MockOwnershipManagerInterface.
type MockOwnershipManagerInterfaceMockRecorder struct {
mock *MockOwnershipManagerInterface
}
// NewMockOwnershipManagerInterface creates a new mock instance.
func NewMockOwnershipManagerInterface(ctrl *gomock.Controller) *MockOwnershipManagerInterface {
mock := &MockOwnershipManagerInterface{ctrl: ctrl}
mock.recorder = &MockOwnershipManagerInterfaceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockOwnershipManagerInterface) EXPECT() *MockOwnershipManagerInterfaceMockRecorder {
return m.recorder
}
// SetFileOwnership mocks base method.
func (m *MockOwnershipManagerInterface) SetFileOwnership(file *safepath.Path) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetFileOwnership", file)
ret0, _ := ret[0].(error)
return ret0
}
// SetFileOwnership indicates an expected call of SetFileOwnership.
func (mr *MockOwnershipManagerInterfaceMockRecorder) SetFileOwnership(file any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFileOwnership", reflect.TypeOf((*MockOwnershipManagerInterface)(nil).SetFileOwnership), file)
}
// UnsafeSetFileOwnership mocks base method.
func (m *MockOwnershipManagerInterface) UnsafeSetFileOwnership(file string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UnsafeSetFileOwnership", file)
ret0, _ := ret[0].(error)
return ret0
}
// UnsafeSetFileOwnership indicates an expected call of UnsafeSetFileOwnership.
func (mr *MockOwnershipManagerInterfaceMockRecorder) UnsafeSetFileOwnership(file any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnsafeSetFileOwnership", reflect.TypeOf((*MockOwnershipManagerInterface)(nil).UnsafeSetFileOwnership), file)
}
/*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package ephemeraldiskutils
//go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE
import (
"errors"
"fmt"
"os"
"os/user"
"strconv"
"syscall"
"kubevirt.io/kubevirt/pkg/safepath"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
// TODO this should be part of structs, instead of a global
var DefaultOwnershipManager OwnershipManagerInterface = &OwnershipManager{user: "qemu"}
// For testing
func MockDefaultOwnershipManager() {
DefaultOwnershipManager = &nonOpManager{}
}
type nonOpManager struct {
}
func (no *nonOpManager) UnsafeSetFileOwnership(_ string) error {
return nil
}
func (no *nonOpManager) SetFileOwnership(_ *safepath.Path) error {
return nil
}
func MockDefaultOwnershipManagerWithFailure() {
DefaultOwnershipManager = &failureManager{}
}
type failureManager struct {
}
func (no *failureManager) UnsafeSetFileOwnership(_ string) error {
panic("unexpected call to UnsafeSetFileOwnership")
}
func (no *failureManager) SetFileOwnership(_ *safepath.Path) error {
panic("unexpected call to SetFileOwnership")
}
type OwnershipManager struct {
user string
}
func (om *OwnershipManager) SetFileOwnership(file *safepath.Path) error {
fd, err := safepath.OpenAtNoFollow(file)
if err != nil {
return err
}
defer fd.Close()
return om.UnsafeSetFileOwnership(fd.SafePath())
}
func (om *OwnershipManager) UnsafeSetFileOwnership(file string) error {
owner, err := user.Lookup(om.user)
if err != nil {
return fmt.Errorf("failed to look up user %s: %v", om.user, err)
}
uid, err := strconv.Atoi(owner.Uid)
if err != nil {
return fmt.Errorf("failed to convert UID %s of user %s: %v", owner.Uid, om.user, err)
}
gid, err := strconv.Atoi(owner.Gid)
if err != nil {
return fmt.Errorf("failed to convert GID %s of user %s: %v", owner.Gid, om.user, err)
}
fileInfo, err := os.Stat(file)
if err != nil {
return err
}
if stat, ok := fileInfo.Sys().(*syscall.Stat_t); ok {
if uid == int(stat.Uid) && gid == int(stat.Gid) {
return nil
}
} else {
return fmt.Errorf("failed to convert stat info")
}
return os.Chown(file, uid, gid)
}
func RemoveFilesIfExist(paths ...string) error {
var err error
for _, path := range paths {
err = os.Remove(path)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
}
return nil
}
func FileExists(path string) (bool, error) {
_, err := os.Stat(path)
exists := false
if err == nil {
exists = true
} else if errors.Is(err, os.ErrNotExist) {
err = nil
}
return exists, err
}
type OwnershipManagerInterface interface {
// Deprecated: UnsafeSetFileOwnership should not be used. Use SetFileOwnership instead.
UnsafeSetFileOwnership(file string) error
SetFileOwnership(file *safepath.Path) error
}
func GetEphemeralBackingSourceBlockDevices(domain *api.Domain) map[string]bool {
isDevEphemeralBackingSource := make(map[string]bool)
for _, disk := range domain.Spec.Devices.Disks {
if disk.BackingStore != nil && disk.BackingStore.Source != nil {
if disk.BackingStore.Type == "block" && disk.BackingStore.Source.Dev != "" && disk.BackingStore.Source.Name != "" {
isDevEphemeralBackingSource[disk.BackingStore.Source.Name] = true
}
}
}
return isDevEphemeralBackingSource
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package ephemeraldisk
import (
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
v1 "kubevirt.io/api/core/v1"
diskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
"kubevirt.io/kubevirt/pkg/util"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
const (
ephemeralDiskPVCBaseDir = "/var/run/kubevirt-private/vmi-disks"
ephemeralDiskBlockDeviceBaseDir = "/dev"
ephemeralDiskFormat = "raw"
)
type EphemeralDiskCreatorInterface interface {
CreateBackedImageForVolume(volume v1.Volume, backingFile string, backingFormat string) error
CreateEphemeralImages(vmi *v1.VirtualMachineInstance, domain *api.Domain) error
GetFilePath(volumeName string) string
Init() error
}
type ephemeralDiskCreator struct {
mountBaseDir string
pvcBaseDir string
blockDevBaseDir string
discCreateFunc func(backingFile string, backingFormat string, imagePath string) ([]byte, error)
}
func NewEphemeralDiskCreator(mountBaseDir string) *ephemeralDiskCreator {
return &ephemeralDiskCreator{
mountBaseDir: mountBaseDir,
pvcBaseDir: ephemeralDiskPVCBaseDir,
blockDevBaseDir: ephemeralDiskBlockDeviceBaseDir,
discCreateFunc: createBackingDisk,
}
}
func (c *ephemeralDiskCreator) Init() error {
return os.MkdirAll(c.mountBaseDir, 0755)
}
func (c *ephemeralDiskCreator) generateVolumeMountDir(volumeName string) string {
return filepath.Join(c.mountBaseDir, volumeName)
}
func (c *ephemeralDiskCreator) getBackingFilePath(volumeName string, isBlockVolume bool) string {
if isBlockVolume {
return filepath.Join(c.blockDevBaseDir, volumeName)
}
return filepath.Join(c.pvcBaseDir, volumeName, "disk.img")
}
func (c *ephemeralDiskCreator) createVolumeDirectory(volumeName string) error {
dir := c.generateVolumeMountDir(volumeName)
err := util.MkdirAllWithNosec(dir)
if err != nil {
return err
}
return nil
}
func (c *ephemeralDiskCreator) GetFilePath(volumeName string) string {
volumeMountDir := c.generateVolumeMountDir(volumeName)
return filepath.Join(volumeMountDir, "disk.qcow2")
}
func (c *ephemeralDiskCreator) CreateBackedImageForVolume(volume v1.Volume, backingFile string, backingFormat string) error {
err := c.createVolumeDirectory(volume.Name)
if err != nil {
return err
}
imagePath := c.GetFilePath(volume.Name)
if _, err := os.Stat(imagePath); err == nil {
return nil
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
output, err := c.discCreateFunc(backingFile, backingFormat, imagePath)
// Cleanup of previous images isn't really necessary as they're all on EmptyDir.
if err != nil {
return fmt.Errorf("qemu-img failed with output '%s': %v", string(output), err)
}
// #nosec G302: Poor file permissions used with chmod. Safe permission setting for files shared between virt-launcher and qemu.
if err = os.Chmod(imagePath, 0640); err != nil {
return fmt.Errorf("failed to change permissions on %s", imagePath)
}
// We need to ensure that the permissions are setup correctly.
err = diskutils.DefaultOwnershipManager.UnsafeSetFileOwnership(imagePath)
return err
}
func (c *ephemeralDiskCreator) CreateEphemeralImages(vmi *v1.VirtualMachineInstance, domain *api.Domain) error {
// The domain is setup to use the COW image instead of the base image. What we have
// to do here is only create the image where the domain expects it (GetFilePath)
// for each disk that requires it.
isBlockVolumes := diskutils.GetEphemeralBackingSourceBlockDevices(domain)
for _, volume := range vmi.Spec.Volumes {
if volume.VolumeSource.Ephemeral != nil {
if err := c.CreateBackedImageForVolume(volume, c.getBackingFilePath(volume.Name, isBlockVolumes[volume.Name]), ephemeralDiskFormat); err != nil {
return err
}
}
}
return nil
}
func createBackingDisk(backingFile string, backingFormat string, imagePath string) ([]byte, error) {
// #nosec No risk for attacker injection. Parameters are predefined strings
cmd := exec.Command("qemu-img",
"create",
"-f",
"qcow2",
"-b",
backingFile,
"-F",
backingFormat,
imagePath,
)
return cmd.CombinedOutput()
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: pkg/handler-launcher-com/cmd/v1/cmd.proto
/*
Package v1 is a generated protocol buffer package.
It is generated from these files:
pkg/handler-launcher-com/cmd/v1/cmd.proto
It has these top-level messages:
QemuVersionResponse
VMI
CPU
Sibling
Pages
Memory
Cell
Topology
SMBios
DiskInfo
ClusterConfig
InterfaceBindingMigration
VirtualMachineOptions
VMIRequest
MigrationRequest
ExecRequest
EmptyRequest
Response
DomainResponse
DomainStatsResponse
GuestInfoResponse
GuestUserListResponse
GuestFilesystemsResponse
ExecResponse
GuestPingRequest
GuestPingResponse
FreezeRequest
MemoryDumpRequest
SEVInfoResponse
LaunchMeasurementResponse
InjectLaunchSecretRequest
DirtyRateStatsResponse
ScreenshotResponse
BackupRequest
*/
package v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type QemuVersionResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
}
func (m *QemuVersionResponse) Reset() { *m = QemuVersionResponse{} }
func (m *QemuVersionResponse) String() string { return proto.CompactTextString(m) }
func (*QemuVersionResponse) ProtoMessage() {}
func (*QemuVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *QemuVersionResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *QemuVersionResponse) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
type VMI struct {
VmiJson []byte `protobuf:"bytes,1,opt,name=vmiJson,proto3" json:"vmiJson,omitempty"`
}
func (m *VMI) Reset() { *m = VMI{} }
func (m *VMI) String() string { return proto.CompactTextString(m) }
func (*VMI) ProtoMessage() {}
func (*VMI) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *VMI) GetVmiJson() []byte {
if m != nil {
return m.VmiJson
}
return nil
}
type CPU struct {
Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
Siblings []uint32 `protobuf:"varint,2,rep,packed,name=siblings" json:"siblings,omitempty"`
}
func (m *CPU) Reset() { *m = CPU{} }
func (m *CPU) String() string { return proto.CompactTextString(m) }
func (*CPU) ProtoMessage() {}
func (*CPU) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *CPU) GetId() uint32 {
if m != nil {
return m.Id
}
return 0
}
func (m *CPU) GetSiblings() []uint32 {
if m != nil {
return m.Siblings
}
return nil
}
type Sibling struct {
Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
Value uint64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"`
}
func (m *Sibling) Reset() { *m = Sibling{} }
func (m *Sibling) String() string { return proto.CompactTextString(m) }
func (*Sibling) ProtoMessage() {}
func (*Sibling) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Sibling) GetId() uint32 {
if m != nil {
return m.Id
}
return 0
}
func (m *Sibling) GetValue() uint64 {
if m != nil {
return m.Value
}
return 0
}
type Pages struct {
Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
Unit string `protobuf:"bytes,2,opt,name=unit" json:"unit,omitempty"`
Size uint32 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"`
}
func (m *Pages) Reset() { *m = Pages{} }
func (m *Pages) String() string { return proto.CompactTextString(m) }
func (*Pages) ProtoMessage() {}
func (*Pages) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Pages) GetCount() uint64 {
if m != nil {
return m.Count
}
return 0
}
func (m *Pages) GetUnit() string {
if m != nil {
return m.Unit
}
return ""
}
func (m *Pages) GetSize() uint32 {
if m != nil {
return m.Size
}
return 0
}
type Memory struct {
Amount uint64 `protobuf:"varint,1,opt,name=amount" json:"amount,omitempty"`
Unit string `protobuf:"bytes,2,opt,name=unit" json:"unit,omitempty"`
}
func (m *Memory) Reset() { *m = Memory{} }
func (m *Memory) String() string { return proto.CompactTextString(m) }
func (*Memory) ProtoMessage() {}
func (*Memory) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Memory) GetAmount() uint64 {
if m != nil {
return m.Amount
}
return 0
}
func (m *Memory) GetUnit() string {
if m != nil {
return m.Unit
}
return ""
}
type Cell struct {
Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
Memory *Memory `protobuf:"bytes,2,opt,name=memory" json:"memory,omitempty"`
Pages []*Pages `protobuf:"bytes,3,rep,name=pages" json:"pages,omitempty"`
Distances []*Sibling `protobuf:"bytes,4,rep,name=distances" json:"distances,omitempty"`
Cpus []*CPU `protobuf:"bytes,5,rep,name=cpus" json:"cpus,omitempty"`
}
func (m *Cell) Reset() { *m = Cell{} }
func (m *Cell) String() string { return proto.CompactTextString(m) }
func (*Cell) ProtoMessage() {}
func (*Cell) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *Cell) GetId() uint32 {
if m != nil {
return m.Id
}
return 0
}
func (m *Cell) GetMemory() *Memory {
if m != nil {
return m.Memory
}
return nil
}
func (m *Cell) GetPages() []*Pages {
if m != nil {
return m.Pages
}
return nil
}
func (m *Cell) GetDistances() []*Sibling {
if m != nil {
return m.Distances
}
return nil
}
func (m *Cell) GetCpus() []*CPU {
if m != nil {
return m.Cpus
}
return nil
}
type Topology struct {
NumaCells []*Cell `protobuf:"bytes,1,rep,name=numa_cells,json=numaCells" json:"numa_cells,omitempty"`
}
func (m *Topology) Reset() { *m = Topology{} }
func (m *Topology) String() string { return proto.CompactTextString(m) }
func (*Topology) ProtoMessage() {}
func (*Topology) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Topology) GetNumaCells() []*Cell {
if m != nil {
return m.NumaCells
}
return nil
}
type SMBios struct {
Manufacturer string `protobuf:"bytes,1,opt,name=manufacturer" json:"manufacturer,omitempty"`
Product string `protobuf:"bytes,2,opt,name=product" json:"product,omitempty"`
Version string `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"`
Sku string `protobuf:"bytes,4,opt,name=sku" json:"sku,omitempty"`
Family string `protobuf:"bytes,5,opt,name=family" json:"family,omitempty"`
}
func (m *SMBios) Reset() { *m = SMBios{} }
func (m *SMBios) String() string { return proto.CompactTextString(m) }
func (*SMBios) ProtoMessage() {}
func (*SMBios) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *SMBios) GetManufacturer() string {
if m != nil {
return m.Manufacturer
}
return ""
}
func (m *SMBios) GetProduct() string {
if m != nil {
return m.Product
}
return ""
}
func (m *SMBios) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *SMBios) GetSku() string {
if m != nil {
return m.Sku
}
return ""
}
func (m *SMBios) GetFamily() string {
if m != nil {
return m.Family
}
return ""
}
type DiskInfo struct {
Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"`
BackingFile string `protobuf:"bytes,2,opt,name=backingFile" json:"backingFile,omitempty"`
ActualSize uint64 `protobuf:"varint,3,opt,name=actualSize" json:"actualSize,omitempty"`
VirtualSize uint64 `protobuf:"varint,4,opt,name=virtualSize" json:"virtualSize,omitempty"`
}
func (m *DiskInfo) Reset() { *m = DiskInfo{} }
func (m *DiskInfo) String() string { return proto.CompactTextString(m) }
func (*DiskInfo) ProtoMessage() {}
func (*DiskInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *DiskInfo) GetFormat() string {
if m != nil {
return m.Format
}
return ""
}
func (m *DiskInfo) GetBackingFile() string {
if m != nil {
return m.BackingFile
}
return ""
}
func (m *DiskInfo) GetActualSize() uint64 {
if m != nil {
return m.ActualSize
}
return 0
}
func (m *DiskInfo) GetVirtualSize() uint64 {
if m != nil {
return m.VirtualSize
}
return 0
}
type ClusterConfig struct {
ExpandDisksEnabled bool `protobuf:"varint,1,opt,name=ExpandDisksEnabled" json:"ExpandDisksEnabled,omitempty"`
FreePageReportingDisabled bool `protobuf:"varint,2,opt,name=FreePageReportingDisabled" json:"FreePageReportingDisabled,omitempty"`
BochsDisplayForEFIGuests bool `protobuf:"varint,3,opt,name=BochsDisplayForEFIGuests" json:"BochsDisplayForEFIGuests,omitempty"`
SerialConsoleLogDisabled bool `protobuf:"varint,4,opt,name=SerialConsoleLogDisabled" json:"SerialConsoleLogDisabled,omitempty"`
}
func (m *ClusterConfig) Reset() { *m = ClusterConfig{} }
func (m *ClusterConfig) String() string { return proto.CompactTextString(m) }
func (*ClusterConfig) ProtoMessage() {}
func (*ClusterConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *ClusterConfig) GetExpandDisksEnabled() bool {
if m != nil {
return m.ExpandDisksEnabled
}
return false
}
func (m *ClusterConfig) GetFreePageReportingDisabled() bool {
if m != nil {
return m.FreePageReportingDisabled
}
return false
}
func (m *ClusterConfig) GetBochsDisplayForEFIGuests() bool {
if m != nil {
return m.BochsDisplayForEFIGuests
}
return false
}
func (m *ClusterConfig) GetSerialConsoleLogDisabled() bool {
if m != nil {
return m.SerialConsoleLogDisabled
}
return false
}
type InterfaceBindingMigration struct {
Method string `protobuf:"bytes,1,opt,name=Method" json:"Method,omitempty"`
}
func (m *InterfaceBindingMigration) Reset() { *m = InterfaceBindingMigration{} }
func (m *InterfaceBindingMigration) String() string { return proto.CompactTextString(m) }
func (*InterfaceBindingMigration) ProtoMessage() {}
func (*InterfaceBindingMigration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (m *InterfaceBindingMigration) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
type VirtualMachineOptions struct {
VirtualMachineSMBios *SMBios `protobuf:"bytes,1,opt,name=VirtualMachineSMBios" json:"VirtualMachineSMBios,omitempty"`
MemBalloonStatsPeriod uint32 `protobuf:"varint,2,opt,name=MemBalloonStatsPeriod" json:"MemBalloonStatsPeriod,omitempty"`
PreallocatedVolumes []string `protobuf:"bytes,3,rep,name=PreallocatedVolumes" json:"PreallocatedVolumes,omitempty"`
Topology *Topology `protobuf:"bytes,4,opt,name=topology" json:"topology,omitempty"`
DisksInfo map[string]*DiskInfo `protobuf:"bytes,5,rep,name=DisksInfo" json:"DisksInfo,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Deprecated, use clusterConfig.ExpandDisksEnabled
ExpandDisksEnabled bool `protobuf:"varint,6,opt,name=ExpandDisksEnabled" json:"ExpandDisksEnabled,omitempty"`
ClusterConfig *ClusterConfig `protobuf:"bytes,7,opt,name=clusterConfig" json:"clusterConfig,omitempty"`
InterfaceDomainAttachment map[string]string `protobuf:"bytes,8,rep,name=interfaceDomainAttachment" json:"interfaceDomainAttachment,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
InterfaceMigration map[string]*InterfaceBindingMigration `protobuf:"bytes,9,rep,name=interfaceMigration" json:"interfaceMigration,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *VirtualMachineOptions) Reset() { *m = VirtualMachineOptions{} }
func (m *VirtualMachineOptions) String() string { return proto.CompactTextString(m) }
func (*VirtualMachineOptions) ProtoMessage() {}
func (*VirtualMachineOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
func (m *VirtualMachineOptions) GetVirtualMachineSMBios() *SMBios {
if m != nil {
return m.VirtualMachineSMBios
}
return nil
}
func (m *VirtualMachineOptions) GetMemBalloonStatsPeriod() uint32 {
if m != nil {
return m.MemBalloonStatsPeriod
}
return 0
}
func (m *VirtualMachineOptions) GetPreallocatedVolumes() []string {
if m != nil {
return m.PreallocatedVolumes
}
return nil
}
func (m *VirtualMachineOptions) GetTopology() *Topology {
if m != nil {
return m.Topology
}
return nil
}
func (m *VirtualMachineOptions) GetDisksInfo() map[string]*DiskInfo {
if m != nil {
return m.DisksInfo
}
return nil
}
func (m *VirtualMachineOptions) GetExpandDisksEnabled() bool {
if m != nil {
return m.ExpandDisksEnabled
}
return false
}
func (m *VirtualMachineOptions) GetClusterConfig() *ClusterConfig {
if m != nil {
return m.ClusterConfig
}
return nil
}
func (m *VirtualMachineOptions) GetInterfaceDomainAttachment() map[string]string {
if m != nil {
return m.InterfaceDomainAttachment
}
return nil
}
func (m *VirtualMachineOptions) GetInterfaceMigration() map[string]*InterfaceBindingMigration {
if m != nil {
return m.InterfaceMigration
}
return nil
}
type VMIRequest struct {
Vmi *VMI `protobuf:"bytes,1,opt,name=vmi" json:"vmi,omitempty"`
Options *VirtualMachineOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
}
func (m *VMIRequest) Reset() { *m = VMIRequest{} }
func (m *VMIRequest) String() string { return proto.CompactTextString(m) }
func (*VMIRequest) ProtoMessage() {}
func (*VMIRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
func (m *VMIRequest) GetVmi() *VMI {
if m != nil {
return m.Vmi
}
return nil
}
func (m *VMIRequest) GetOptions() *VirtualMachineOptions {
if m != nil {
return m.Options
}
return nil
}
type MigrationRequest struct {
Vmi *VMI `protobuf:"bytes,1,opt,name=vmi" json:"vmi,omitempty"`
Options []byte `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
}
func (m *MigrationRequest) Reset() { *m = MigrationRequest{} }
func (m *MigrationRequest) String() string { return proto.CompactTextString(m) }
func (*MigrationRequest) ProtoMessage() {}
func (*MigrationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
func (m *MigrationRequest) GetVmi() *VMI {
if m != nil {
return m.Vmi
}
return nil
}
func (m *MigrationRequest) GetOptions() []byte {
if m != nil {
return m.Options
}
return nil
}
type ExecRequest struct {
DomainName string `protobuf:"bytes,1,opt,name=domainName" json:"domainName,omitempty"`
Command string `protobuf:"bytes,2,opt,name=Command" json:"Command,omitempty"`
Args []string `protobuf:"bytes,3,rep,name=Args" json:"Args,omitempty"`
TimeoutSeconds int32 `protobuf:"varint,4,opt,name=timeoutSeconds" json:"timeoutSeconds,omitempty"`
}
func (m *ExecRequest) Reset() { *m = ExecRequest{} }
func (m *ExecRequest) String() string { return proto.CompactTextString(m) }
func (*ExecRequest) ProtoMessage() {}
func (*ExecRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
func (m *ExecRequest) GetDomainName() string {
if m != nil {
return m.DomainName
}
return ""
}
func (m *ExecRequest) GetCommand() string {
if m != nil {
return m.Command
}
return ""
}
func (m *ExecRequest) GetArgs() []string {
if m != nil {
return m.Args
}
return nil
}
func (m *ExecRequest) GetTimeoutSeconds() int32 {
if m != nil {
return m.TimeoutSeconds
}
return 0
}
type EmptyRequest struct {
}
func (m *EmptyRequest) Reset() { *m = EmptyRequest{} }
func (m *EmptyRequest) String() string { return proto.CompactTextString(m) }
func (*EmptyRequest) ProtoMessage() {}
func (*EmptyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
type Response struct {
Success bool `protobuf:"varint,1,opt,name=success" json:"success,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
func (m *Response) GetSuccess() bool {
if m != nil {
return m.Success
}
return false
}
func (m *Response) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
type DomainResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
Domain string `protobuf:"bytes,2,opt,name=domain" json:"domain,omitempty"`
}
func (m *DomainResponse) Reset() { *m = DomainResponse{} }
func (m *DomainResponse) String() string { return proto.CompactTextString(m) }
func (*DomainResponse) ProtoMessage() {}
func (*DomainResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *DomainResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *DomainResponse) GetDomain() string {
if m != nil {
return m.Domain
}
return ""
}
type DomainStatsResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
DomainStats string `protobuf:"bytes,2,opt,name=domainStats" json:"domainStats,omitempty"`
}
func (m *DomainStatsResponse) Reset() { *m = DomainStatsResponse{} }
func (m *DomainStatsResponse) String() string { return proto.CompactTextString(m) }
func (*DomainStatsResponse) ProtoMessage() {}
func (*DomainStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
func (m *DomainStatsResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *DomainStatsResponse) GetDomainStats() string {
if m != nil {
return m.DomainStats
}
return ""
}
type GuestInfoResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
GuestInfoResponse string `protobuf:"bytes,2,opt,name=guestInfoResponse" json:"guestInfoResponse,omitempty"`
}
func (m *GuestInfoResponse) Reset() { *m = GuestInfoResponse{} }
func (m *GuestInfoResponse) String() string { return proto.CompactTextString(m) }
func (*GuestInfoResponse) ProtoMessage() {}
func (*GuestInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *GuestInfoResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *GuestInfoResponse) GetGuestInfoResponse() string {
if m != nil {
return m.GuestInfoResponse
}
return ""
}
type GuestUserListResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
GuestUserListResponse string `protobuf:"bytes,2,opt,name=guestUserListResponse" json:"guestUserListResponse,omitempty"`
}
func (m *GuestUserListResponse) Reset() { *m = GuestUserListResponse{} }
func (m *GuestUserListResponse) String() string { return proto.CompactTextString(m) }
func (*GuestUserListResponse) ProtoMessage() {}
func (*GuestUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
func (m *GuestUserListResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *GuestUserListResponse) GetGuestUserListResponse() string {
if m != nil {
return m.GuestUserListResponse
}
return ""
}
type GuestFilesystemsResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
GuestFilesystemsResponse string `protobuf:"bytes,2,opt,name=guestFilesystemsResponse" json:"guestFilesystemsResponse,omitempty"`
}
func (m *GuestFilesystemsResponse) Reset() { *m = GuestFilesystemsResponse{} }
func (m *GuestFilesystemsResponse) String() string { return proto.CompactTextString(m) }
func (*GuestFilesystemsResponse) ProtoMessage() {}
func (*GuestFilesystemsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
func (m *GuestFilesystemsResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *GuestFilesystemsResponse) GetGuestFilesystemsResponse() string {
if m != nil {
return m.GuestFilesystemsResponse
}
return ""
}
type ExecResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
ExitCode int32 `protobuf:"varint,2,opt,name=exitCode" json:"exitCode,omitempty"`
StdOut string `protobuf:"bytes,3,opt,name=stdOut" json:"stdOut,omitempty"`
}
func (m *ExecResponse) Reset() { *m = ExecResponse{} }
func (m *ExecResponse) String() string { return proto.CompactTextString(m) }
func (*ExecResponse) ProtoMessage() {}
func (*ExecResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
func (m *ExecResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *ExecResponse) GetExitCode() int32 {
if m != nil {
return m.ExitCode
}
return 0
}
func (m *ExecResponse) GetStdOut() string {
if m != nil {
return m.StdOut
}
return ""
}
type GuestPingRequest struct {
DomainName string `protobuf:"bytes,1,opt,name=domainName" json:"domainName,omitempty"`
TimeoutSeconds int32 `protobuf:"varint,2,opt,name=timeoutSeconds" json:"timeoutSeconds,omitempty"`
}
func (m *GuestPingRequest) Reset() { *m = GuestPingRequest{} }
func (m *GuestPingRequest) String() string { return proto.CompactTextString(m) }
func (*GuestPingRequest) ProtoMessage() {}
func (*GuestPingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
func (m *GuestPingRequest) GetDomainName() string {
if m != nil {
return m.DomainName
}
return ""
}
func (m *GuestPingRequest) GetTimeoutSeconds() int32 {
if m != nil {
return m.TimeoutSeconds
}
return 0
}
type GuestPingResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
}
func (m *GuestPingResponse) Reset() { *m = GuestPingResponse{} }
func (m *GuestPingResponse) String() string { return proto.CompactTextString(m) }
func (*GuestPingResponse) ProtoMessage() {}
func (*GuestPingResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
func (m *GuestPingResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
type FreezeRequest struct {
Vmi *VMI `protobuf:"bytes,1,opt,name=vmi" json:"vmi,omitempty"`
UnfreezeTimeoutSeconds int32 `protobuf:"varint,2,opt,name=unfreezeTimeoutSeconds" json:"unfreezeTimeoutSeconds,omitempty"`
}
func (m *FreezeRequest) Reset() { *m = FreezeRequest{} }
func (m *FreezeRequest) String() string { return proto.CompactTextString(m) }
func (*FreezeRequest) ProtoMessage() {}
func (*FreezeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
func (m *FreezeRequest) GetVmi() *VMI {
if m != nil {
return m.Vmi
}
return nil
}
func (m *FreezeRequest) GetUnfreezeTimeoutSeconds() int32 {
if m != nil {
return m.UnfreezeTimeoutSeconds
}
return 0
}
type MemoryDumpRequest struct {
Vmi *VMI `protobuf:"bytes,1,opt,name=vmi" json:"vmi,omitempty"`
DumpPath string `protobuf:"bytes,2,opt,name=dumpPath" json:"dumpPath,omitempty"`
}
func (m *MemoryDumpRequest) Reset() { *m = MemoryDumpRequest{} }
func (m *MemoryDumpRequest) String() string { return proto.CompactTextString(m) }
func (*MemoryDumpRequest) ProtoMessage() {}
func (*MemoryDumpRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
func (m *MemoryDumpRequest) GetVmi() *VMI {
if m != nil {
return m.Vmi
}
return nil
}
func (m *MemoryDumpRequest) GetDumpPath() string {
if m != nil {
return m.DumpPath
}
return ""
}
type SEVInfoResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
SevInfo []byte `protobuf:"bytes,2,opt,name=sevInfo,proto3" json:"sevInfo,omitempty"`
}
func (m *SEVInfoResponse) Reset() { *m = SEVInfoResponse{} }
func (m *SEVInfoResponse) String() string { return proto.CompactTextString(m) }
func (*SEVInfoResponse) ProtoMessage() {}
func (*SEVInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
func (m *SEVInfoResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *SEVInfoResponse) GetSevInfo() []byte {
if m != nil {
return m.SevInfo
}
return nil
}
type LaunchMeasurementResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
LaunchMeasurement []byte `protobuf:"bytes,2,opt,name=launchMeasurement,proto3" json:"launchMeasurement,omitempty"`
}
func (m *LaunchMeasurementResponse) Reset() { *m = LaunchMeasurementResponse{} }
func (m *LaunchMeasurementResponse) String() string { return proto.CompactTextString(m) }
func (*LaunchMeasurementResponse) ProtoMessage() {}
func (*LaunchMeasurementResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
func (m *LaunchMeasurementResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *LaunchMeasurementResponse) GetLaunchMeasurement() []byte {
if m != nil {
return m.LaunchMeasurement
}
return nil
}
type InjectLaunchSecretRequest struct {
Vmi *VMI `protobuf:"bytes,1,opt,name=vmi" json:"vmi,omitempty"`
Options []byte `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
}
func (m *InjectLaunchSecretRequest) Reset() { *m = InjectLaunchSecretRequest{} }
func (m *InjectLaunchSecretRequest) String() string { return proto.CompactTextString(m) }
func (*InjectLaunchSecretRequest) ProtoMessage() {}
func (*InjectLaunchSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
func (m *InjectLaunchSecretRequest) GetVmi() *VMI {
if m != nil {
return m.Vmi
}
return nil
}
func (m *InjectLaunchSecretRequest) GetOptions() []byte {
if m != nil {
return m.Options
}
return nil
}
type DirtyRateStatsResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
DirtyRateMbs int64 `protobuf:"varint,2,opt,name=dirtyRateMbs" json:"dirtyRateMbs,omitempty"`
}
func (m *DirtyRateStatsResponse) Reset() { *m = DirtyRateStatsResponse{} }
func (m *DirtyRateStatsResponse) String() string { return proto.CompactTextString(m) }
func (*DirtyRateStatsResponse) ProtoMessage() {}
func (*DirtyRateStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
func (m *DirtyRateStatsResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *DirtyRateStatsResponse) GetDirtyRateMbs() int64 {
if m != nil {
return m.DirtyRateMbs
}
return 0
}
type ScreenshotResponse struct {
Response *Response `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
Mime string `protobuf:"bytes,2,opt,name=mime" json:"mime,omitempty"`
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *ScreenshotResponse) Reset() { *m = ScreenshotResponse{} }
func (m *ScreenshotResponse) String() string { return proto.CompactTextString(m) }
func (*ScreenshotResponse) ProtoMessage() {}
func (*ScreenshotResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
func (m *ScreenshotResponse) GetResponse() *Response {
if m != nil {
return m.Response
}
return nil
}
func (m *ScreenshotResponse) GetMime() string {
if m != nil {
return m.Mime
}
return ""
}
func (m *ScreenshotResponse) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
type BackupRequest struct {
Vmi *VMI `protobuf:"bytes,1,opt,name=vmi" json:"vmi,omitempty"`
Options []byte `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
}
func (m *BackupRequest) Reset() { *m = BackupRequest{} }
func (m *BackupRequest) String() string { return proto.CompactTextString(m) }
func (*BackupRequest) ProtoMessage() {}
func (*BackupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
func (m *BackupRequest) GetVmi() *VMI {
if m != nil {
return m.Vmi
}
return nil
}
func (m *BackupRequest) GetOptions() []byte {
if m != nil {
return m.Options
}
return nil
}
func init() {
proto.RegisterType((*QemuVersionResponse)(nil), "kubevirt.cmd.v1.QemuVersionResponse")
proto.RegisterType((*VMI)(nil), "kubevirt.cmd.v1.VMI")
proto.RegisterType((*CPU)(nil), "kubevirt.cmd.v1.CPU")
proto.RegisterType((*Sibling)(nil), "kubevirt.cmd.v1.Sibling")
proto.RegisterType((*Pages)(nil), "kubevirt.cmd.v1.Pages")
proto.RegisterType((*Memory)(nil), "kubevirt.cmd.v1.Memory")
proto.RegisterType((*Cell)(nil), "kubevirt.cmd.v1.Cell")
proto.RegisterType((*Topology)(nil), "kubevirt.cmd.v1.Topology")
proto.RegisterType((*SMBios)(nil), "kubevirt.cmd.v1.SMBios")
proto.RegisterType((*DiskInfo)(nil), "kubevirt.cmd.v1.DiskInfo")
proto.RegisterType((*ClusterConfig)(nil), "kubevirt.cmd.v1.ClusterConfig")
proto.RegisterType((*InterfaceBindingMigration)(nil), "kubevirt.cmd.v1.InterfaceBindingMigration")
proto.RegisterType((*VirtualMachineOptions)(nil), "kubevirt.cmd.v1.VirtualMachineOptions")
proto.RegisterType((*VMIRequest)(nil), "kubevirt.cmd.v1.VMIRequest")
proto.RegisterType((*MigrationRequest)(nil), "kubevirt.cmd.v1.MigrationRequest")
proto.RegisterType((*ExecRequest)(nil), "kubevirt.cmd.v1.ExecRequest")
proto.RegisterType((*EmptyRequest)(nil), "kubevirt.cmd.v1.EmptyRequest")
proto.RegisterType((*Response)(nil), "kubevirt.cmd.v1.Response")
proto.RegisterType((*DomainResponse)(nil), "kubevirt.cmd.v1.DomainResponse")
proto.RegisterType((*DomainStatsResponse)(nil), "kubevirt.cmd.v1.DomainStatsResponse")
proto.RegisterType((*GuestInfoResponse)(nil), "kubevirt.cmd.v1.GuestInfoResponse")
proto.RegisterType((*GuestUserListResponse)(nil), "kubevirt.cmd.v1.GuestUserListResponse")
proto.RegisterType((*GuestFilesystemsResponse)(nil), "kubevirt.cmd.v1.GuestFilesystemsResponse")
proto.RegisterType((*ExecResponse)(nil), "kubevirt.cmd.v1.ExecResponse")
proto.RegisterType((*GuestPingRequest)(nil), "kubevirt.cmd.v1.GuestPingRequest")
proto.RegisterType((*GuestPingResponse)(nil), "kubevirt.cmd.v1.GuestPingResponse")
proto.RegisterType((*FreezeRequest)(nil), "kubevirt.cmd.v1.FreezeRequest")
proto.RegisterType((*MemoryDumpRequest)(nil), "kubevirt.cmd.v1.MemoryDumpRequest")
proto.RegisterType((*SEVInfoResponse)(nil), "kubevirt.cmd.v1.SEVInfoResponse")
proto.RegisterType((*LaunchMeasurementResponse)(nil), "kubevirt.cmd.v1.LaunchMeasurementResponse")
proto.RegisterType((*InjectLaunchSecretRequest)(nil), "kubevirt.cmd.v1.InjectLaunchSecretRequest")
proto.RegisterType((*DirtyRateStatsResponse)(nil), "kubevirt.cmd.v1.DirtyRateStatsResponse")
proto.RegisterType((*ScreenshotResponse)(nil), "kubevirt.cmd.v1.ScreenshotResponse")
proto.RegisterType((*BackupRequest)(nil), "kubevirt.cmd.v1.BackupRequest")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Cmd service
type CmdClient interface {
SyncVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
PauseVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
UnpauseVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
FreezeVirtualMachine(ctx context.Context, in *FreezeRequest, opts ...grpc.CallOption) (*Response, error)
UnfreezeVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
ResetVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
SoftRebootVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
ShutdownVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
KillVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
DeleteVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
MigrateVirtualMachine(ctx context.Context, in *MigrationRequest, opts ...grpc.CallOption) (*Response, error)
SyncMigrationTarget(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
CancelVirtualMachineMigration(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
SignalTargetPodCleanup(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
FinalizeVirtualMachineMigration(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
HotplugHostDevices(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
GetDomain(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DomainResponse, error)
GetDomainStats(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DomainStatsResponse, error)
GetGuestInfo(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestInfoResponse, error)
GetUsers(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestUserListResponse, error)
GetFilesystems(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestFilesystemsResponse, error)
Ping(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*Response, error)
Exec(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error)
GuestPing(ctx context.Context, in *GuestPingRequest, opts ...grpc.CallOption) (*GuestPingResponse, error)
VirtualMachineMemoryDump(ctx context.Context, in *MemoryDumpRequest, opts ...grpc.CallOption) (*Response, error)
GetQemuVersion(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*QemuVersionResponse, error)
SyncVirtualMachineCPUs(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
SyncVirtualMachineMemory(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error)
GetSEVInfo(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*SEVInfoResponse, error)
GetLaunchMeasurement(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*LaunchMeasurementResponse, error)
InjectLaunchSecret(ctx context.Context, in *InjectLaunchSecretRequest, opts ...grpc.CallOption) (*Response, error)
GetDomainDirtyRateStats(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DirtyRateStatsResponse, error)
GetScreenshot(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*ScreenshotResponse, error)
BackupVirtualMachine(ctx context.Context, in *BackupRequest, opts ...grpc.CallOption) (*Response, error)
}
type cmdClient struct {
cc *grpc.ClientConn
}
func NewCmdClient(cc *grpc.ClientConn) CmdClient {
return &cmdClient{cc}
}
func (c *cmdClient) SyncVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/SyncVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) PauseVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/PauseVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) UnpauseVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/UnpauseVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) FreezeVirtualMachine(ctx context.Context, in *FreezeRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/FreezeVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) UnfreezeVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/UnfreezeVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) ResetVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/ResetVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) SoftRebootVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/SoftRebootVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) ShutdownVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/ShutdownVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) KillVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/KillVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) DeleteVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/DeleteVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) MigrateVirtualMachine(ctx context.Context, in *MigrationRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/MigrateVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) SyncMigrationTarget(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/SyncMigrationTarget", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) CancelVirtualMachineMigration(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/CancelVirtualMachineMigration", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) SignalTargetPodCleanup(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/SignalTargetPodCleanup", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) FinalizeVirtualMachineMigration(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/FinalizeVirtualMachineMigration", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) HotplugHostDevices(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/HotplugHostDevices", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetDomain(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DomainResponse, error) {
out := new(DomainResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetDomain", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetDomainStats(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DomainStatsResponse, error) {
out := new(DomainStatsResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetDomainStats", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetGuestInfo(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestInfoResponse, error) {
out := new(GuestInfoResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetGuestInfo", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetUsers(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestUserListResponse, error) {
out := new(GuestUserListResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetUsers", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetFilesystems(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestFilesystemsResponse, error) {
out := new(GuestFilesystemsResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetFilesystems", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) Ping(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/Ping", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) Exec(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error) {
out := new(ExecResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/Exec", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GuestPing(ctx context.Context, in *GuestPingRequest, opts ...grpc.CallOption) (*GuestPingResponse, error) {
out := new(GuestPingResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GuestPing", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) VirtualMachineMemoryDump(ctx context.Context, in *MemoryDumpRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/VirtualMachineMemoryDump", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetQemuVersion(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*QemuVersionResponse, error) {
out := new(QemuVersionResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetQemuVersion", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) SyncVirtualMachineCPUs(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/SyncVirtualMachineCPUs", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) SyncVirtualMachineMemory(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/SyncVirtualMachineMemory", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetSEVInfo(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*SEVInfoResponse, error) {
out := new(SEVInfoResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetSEVInfo", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetLaunchMeasurement(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*LaunchMeasurementResponse, error) {
out := new(LaunchMeasurementResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetLaunchMeasurement", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) InjectLaunchSecret(ctx context.Context, in *InjectLaunchSecretRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/InjectLaunchSecret", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetDomainDirtyRateStats(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DirtyRateStatsResponse, error) {
out := new(DirtyRateStatsResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetDomainDirtyRateStats", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) GetScreenshot(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*ScreenshotResponse, error) {
out := new(ScreenshotResponse)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/GetScreenshot", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *cmdClient) BackupVirtualMachine(ctx context.Context, in *BackupRequest, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/kubevirt.cmd.v1.Cmd/BackupVirtualMachine", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Cmd service
type CmdServer interface {
SyncVirtualMachine(context.Context, *VMIRequest) (*Response, error)
PauseVirtualMachine(context.Context, *VMIRequest) (*Response, error)
UnpauseVirtualMachine(context.Context, *VMIRequest) (*Response, error)
FreezeVirtualMachine(context.Context, *FreezeRequest) (*Response, error)
UnfreezeVirtualMachine(context.Context, *VMIRequest) (*Response, error)
ResetVirtualMachine(context.Context, *VMIRequest) (*Response, error)
SoftRebootVirtualMachine(context.Context, *VMIRequest) (*Response, error)
ShutdownVirtualMachine(context.Context, *VMIRequest) (*Response, error)
KillVirtualMachine(context.Context, *VMIRequest) (*Response, error)
DeleteVirtualMachine(context.Context, *VMIRequest) (*Response, error)
MigrateVirtualMachine(context.Context, *MigrationRequest) (*Response, error)
SyncMigrationTarget(context.Context, *VMIRequest) (*Response, error)
CancelVirtualMachineMigration(context.Context, *VMIRequest) (*Response, error)
SignalTargetPodCleanup(context.Context, *VMIRequest) (*Response, error)
FinalizeVirtualMachineMigration(context.Context, *VMIRequest) (*Response, error)
HotplugHostDevices(context.Context, *VMIRequest) (*Response, error)
GetDomain(context.Context, *EmptyRequest) (*DomainResponse, error)
GetDomainStats(context.Context, *EmptyRequest) (*DomainStatsResponse, error)
GetGuestInfo(context.Context, *EmptyRequest) (*GuestInfoResponse, error)
GetUsers(context.Context, *EmptyRequest) (*GuestUserListResponse, error)
GetFilesystems(context.Context, *EmptyRequest) (*GuestFilesystemsResponse, error)
Ping(context.Context, *EmptyRequest) (*Response, error)
Exec(context.Context, *ExecRequest) (*ExecResponse, error)
GuestPing(context.Context, *GuestPingRequest) (*GuestPingResponse, error)
VirtualMachineMemoryDump(context.Context, *MemoryDumpRequest) (*Response, error)
GetQemuVersion(context.Context, *EmptyRequest) (*QemuVersionResponse, error)
SyncVirtualMachineCPUs(context.Context, *VMIRequest) (*Response, error)
SyncVirtualMachineMemory(context.Context, *VMIRequest) (*Response, error)
GetSEVInfo(context.Context, *EmptyRequest) (*SEVInfoResponse, error)
GetLaunchMeasurement(context.Context, *VMIRequest) (*LaunchMeasurementResponse, error)
InjectLaunchSecret(context.Context, *InjectLaunchSecretRequest) (*Response, error)
GetDomainDirtyRateStats(context.Context, *EmptyRequest) (*DirtyRateStatsResponse, error)
GetScreenshot(context.Context, *VMIRequest) (*ScreenshotResponse, error)
BackupVirtualMachine(context.Context, *BackupRequest) (*Response, error)
}
func RegisterCmdServer(s *grpc.Server, srv CmdServer) {
s.RegisterService(&_Cmd_serviceDesc, srv)
}
func _Cmd_SyncVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).SyncVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/SyncVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).SyncVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_PauseVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).PauseVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/PauseVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).PauseVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_UnpauseVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).UnpauseVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/UnpauseVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).UnpauseVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_FreezeVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FreezeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).FreezeVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/FreezeVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).FreezeVirtualMachine(ctx, req.(*FreezeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_UnfreezeVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).UnfreezeVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/UnfreezeVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).UnfreezeVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_ResetVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).ResetVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/ResetVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).ResetVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_SoftRebootVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).SoftRebootVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/SoftRebootVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).SoftRebootVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_ShutdownVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).ShutdownVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/ShutdownVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).ShutdownVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_KillVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).KillVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/KillVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).KillVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_DeleteVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).DeleteVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/DeleteVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).DeleteVirtualMachine(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_MigrateVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MigrationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).MigrateVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/MigrateVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).MigrateVirtualMachine(ctx, req.(*MigrationRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_SyncMigrationTarget_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).SyncMigrationTarget(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/SyncMigrationTarget",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).SyncMigrationTarget(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_CancelVirtualMachineMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).CancelVirtualMachineMigration(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/CancelVirtualMachineMigration",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).CancelVirtualMachineMigration(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_SignalTargetPodCleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).SignalTargetPodCleanup(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/SignalTargetPodCleanup",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).SignalTargetPodCleanup(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_FinalizeVirtualMachineMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).FinalizeVirtualMachineMigration(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/FinalizeVirtualMachineMigration",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).FinalizeVirtualMachineMigration(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_HotplugHostDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).HotplugHostDevices(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/HotplugHostDevices",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).HotplugHostDevices(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetDomain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetDomain(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetDomain",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetDomain(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetDomainStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetDomainStats(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetDomainStats",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetDomainStats(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetGuestInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetGuestInfo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetGuestInfo",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetGuestInfo(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetUsers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetUsers",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetUsers(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetFilesystems_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetFilesystems(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetFilesystems",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetFilesystems(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).Ping(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/Ping",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).Ping(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExecRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).Exec(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/Exec",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).Exec(ctx, req.(*ExecRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GuestPing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GuestPingRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GuestPing(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GuestPing",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GuestPing(ctx, req.(*GuestPingRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_VirtualMachineMemoryDump_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MemoryDumpRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).VirtualMachineMemoryDump(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/VirtualMachineMemoryDump",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).VirtualMachineMemoryDump(ctx, req.(*MemoryDumpRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetQemuVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetQemuVersion(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetQemuVersion",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetQemuVersion(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_SyncVirtualMachineCPUs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).SyncVirtualMachineCPUs(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/SyncVirtualMachineCPUs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).SyncVirtualMachineCPUs(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_SyncVirtualMachineMemory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).SyncVirtualMachineMemory(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/SyncVirtualMachineMemory",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).SyncVirtualMachineMemory(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetSEVInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetSEVInfo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetSEVInfo",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetSEVInfo(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetLaunchMeasurement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetLaunchMeasurement(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetLaunchMeasurement",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetLaunchMeasurement(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_InjectLaunchSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InjectLaunchSecretRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).InjectLaunchSecret(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/InjectLaunchSecret",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).InjectLaunchSecret(ctx, req.(*InjectLaunchSecretRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetDomainDirtyRateStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EmptyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetDomainDirtyRateStats(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetDomainDirtyRateStats",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetDomainDirtyRateStats(ctx, req.(*EmptyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_GetScreenshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(VMIRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).GetScreenshot(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/GetScreenshot",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).GetScreenshot(ctx, req.(*VMIRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Cmd_BackupVirtualMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BackupRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CmdServer).BackupVirtualMachine(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.cmd.v1.Cmd/BackupVirtualMachine",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CmdServer).BackupVirtualMachine(ctx, req.(*BackupRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Cmd_serviceDesc = grpc.ServiceDesc{
ServiceName: "kubevirt.cmd.v1.Cmd",
HandlerType: (*CmdServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "SyncVirtualMachine",
Handler: _Cmd_SyncVirtualMachine_Handler,
},
{
MethodName: "PauseVirtualMachine",
Handler: _Cmd_PauseVirtualMachine_Handler,
},
{
MethodName: "UnpauseVirtualMachine",
Handler: _Cmd_UnpauseVirtualMachine_Handler,
},
{
MethodName: "FreezeVirtualMachine",
Handler: _Cmd_FreezeVirtualMachine_Handler,
},
{
MethodName: "UnfreezeVirtualMachine",
Handler: _Cmd_UnfreezeVirtualMachine_Handler,
},
{
MethodName: "ResetVirtualMachine",
Handler: _Cmd_ResetVirtualMachine_Handler,
},
{
MethodName: "SoftRebootVirtualMachine",
Handler: _Cmd_SoftRebootVirtualMachine_Handler,
},
{
MethodName: "ShutdownVirtualMachine",
Handler: _Cmd_ShutdownVirtualMachine_Handler,
},
{
MethodName: "KillVirtualMachine",
Handler: _Cmd_KillVirtualMachine_Handler,
},
{
MethodName: "DeleteVirtualMachine",
Handler: _Cmd_DeleteVirtualMachine_Handler,
},
{
MethodName: "MigrateVirtualMachine",
Handler: _Cmd_MigrateVirtualMachine_Handler,
},
{
MethodName: "SyncMigrationTarget",
Handler: _Cmd_SyncMigrationTarget_Handler,
},
{
MethodName: "CancelVirtualMachineMigration",
Handler: _Cmd_CancelVirtualMachineMigration_Handler,
},
{
MethodName: "SignalTargetPodCleanup",
Handler: _Cmd_SignalTargetPodCleanup_Handler,
},
{
MethodName: "FinalizeVirtualMachineMigration",
Handler: _Cmd_FinalizeVirtualMachineMigration_Handler,
},
{
MethodName: "HotplugHostDevices",
Handler: _Cmd_HotplugHostDevices_Handler,
},
{
MethodName: "GetDomain",
Handler: _Cmd_GetDomain_Handler,
},
{
MethodName: "GetDomainStats",
Handler: _Cmd_GetDomainStats_Handler,
},
{
MethodName: "GetGuestInfo",
Handler: _Cmd_GetGuestInfo_Handler,
},
{
MethodName: "GetUsers",
Handler: _Cmd_GetUsers_Handler,
},
{
MethodName: "GetFilesystems",
Handler: _Cmd_GetFilesystems_Handler,
},
{
MethodName: "Ping",
Handler: _Cmd_Ping_Handler,
},
{
MethodName: "Exec",
Handler: _Cmd_Exec_Handler,
},
{
MethodName: "GuestPing",
Handler: _Cmd_GuestPing_Handler,
},
{
MethodName: "VirtualMachineMemoryDump",
Handler: _Cmd_VirtualMachineMemoryDump_Handler,
},
{
MethodName: "GetQemuVersion",
Handler: _Cmd_GetQemuVersion_Handler,
},
{
MethodName: "SyncVirtualMachineCPUs",
Handler: _Cmd_SyncVirtualMachineCPUs_Handler,
},
{
MethodName: "SyncVirtualMachineMemory",
Handler: _Cmd_SyncVirtualMachineMemory_Handler,
},
{
MethodName: "GetSEVInfo",
Handler: _Cmd_GetSEVInfo_Handler,
},
{
MethodName: "GetLaunchMeasurement",
Handler: _Cmd_GetLaunchMeasurement_Handler,
},
{
MethodName: "InjectLaunchSecret",
Handler: _Cmd_InjectLaunchSecret_Handler,
},
{
MethodName: "GetDomainDirtyRateStats",
Handler: _Cmd_GetDomainDirtyRateStats_Handler,
},
{
MethodName: "GetScreenshot",
Handler: _Cmd_GetScreenshot_Handler,
},
{
MethodName: "BackupVirtualMachine",
Handler: _Cmd_BackupVirtualMachine_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/handler-launcher-com/cmd/v1/cmd.proto",
}
func init() { proto.RegisterFile("pkg/handler-launcher-com/cmd/v1/cmd.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 1920 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0x5f, 0x73, 0xdb, 0xc6,
0x11, 0x17, 0x45, 0x4a, 0x22, 0x57, 0x7f, 0x62, 0x9f, 0x25, 0x19, 0x52, 0x6b, 0x5b, 0xbd, 0x76,
0x5c, 0xa5, 0x93, 0x48, 0xb5, 0xe3, 0x64, 0x3a, 0x9e, 0x4e, 0xc6, 0x11, 0x45, 0x29, 0x4a, 0x4c,
0x9b, 0x06, 0x25, 0x79, 0x9a, 0x36, 0x93, 0x39, 0x01, 0x27, 0xf2, 0x2a, 0xe0, 0x8e, 0xc1, 0x1d,
0x58, 0x33, 0x4f, 0x9d, 0x71, 0xa7, 0x0f, 0x9d, 0xe9, 0xe7, 0x6b, 0x9f, 0xfa, 0x2d, 0xfa, 0x9e,
0xb9, 0x03, 0x40, 0x81, 0x04, 0x20, 0x5a, 0x43, 0x3e, 0x11, 0x77, 0xb7, 0xfb, 0xdb, 0xbd, 0xbd,
0xdd, 0xbd, 0x1f, 0x40, 0xf8, 0xb8, 0x77, 0xd5, 0xd9, 0xef, 0x12, 0xee, 0x7a, 0x34, 0xf8, 0xd4,
0x23, 0x21, 0x77, 0xba, 0x34, 0xf8, 0xd4, 0x11, 0xfe, 0xbe, 0xe3, 0xbb, 0xfb, 0xfd, 0x27, 0xfa,
0x67, 0xaf, 0x17, 0x08, 0x25, 0xd0, 0x47, 0x57, 0xe1, 0x05, 0xed, 0xb3, 0x40, 0xed, 0xe9, 0xb9,
0xfe, 0x13, 0x7c, 0x09, 0xf7, 0xde, 0x50, 0x3f, 0x3c, 0xa7, 0x81, 0x64, 0x82, 0xdb, 0x54, 0xf6,
0x04, 0x97, 0x14, 0x7d, 0x0e, 0xd5, 0x20, 0x7e, 0xb6, 0x4a, 0x3b, 0xa5, 0xdd, 0xe5, 0xa7, 0x5b,
0x7b, 0x63, 0xaa, 0x7b, 0x89, 0xb0, 0x3d, 0x14, 0x45, 0x16, 0x2c, 0xf5, 0x23, 0x24, 0x6b, 0x7e,
0xa7, 0xb4, 0x5b, 0xb3, 0x93, 0x21, 0x7e, 0x04, 0xe5, 0xf3, 0xe6, 0x89, 0x11, 0xf0, 0xd9, 0x37,
0x52, 0x70, 0x03, 0xbb, 0x62, 0x27, 0x43, 0xfc, 0x04, 0xca, 0xf5, 0xd6, 0x19, 0x5a, 0x83, 0x79,
0xe6, 0x9a, 0xb5, 0x55, 0x7b, 0x9e, 0xb9, 0x68, 0x1b, 0xaa, 0x92, 0x5d, 0x78, 0x8c, 0x77, 0xa4,
0x35, 0xbf, 0x53, 0xde, 0x5d, 0xb5, 0x87, 0x63, 0xbc, 0x0f, 0x4b, 0xed, 0xe8, 0x39, 0xa3, 0xb6,
0x0e, 0x0b, 0x7d, 0xe2, 0x85, 0xd4, 0xb8, 0x51, 0xb1, 0xa3, 0x01, 0x6e, 0xc0, 0x42, 0x8b, 0x74,
0xa8, 0xd4, 0xcb, 0x8e, 0x08, 0xb9, 0x32, 0x1a, 0x15, 0x3b, 0x1a, 0x20, 0x04, 0x95, 0x90, 0x33,
0x15, 0xbb, 0x6e, 0x9e, 0xf5, 0x9c, 0x64, 0x3f, 0x51, 0xab, 0x6c, 0xa0, 0xcd, 0x33, 0x7e, 0x06,
0x8b, 0x4d, 0xea, 0x8b, 0x60, 0x80, 0x36, 0x61, 0x91, 0xf8, 0x29, 0xa0, 0x78, 0x94, 0x87, 0x84,
0xff, 0x53, 0x82, 0x4a, 0x9d, 0x7a, 0x5e, 0xc6, 0xd7, 0x7d, 0x58, 0xf4, 0x0d, 0x9c, 0x11, 0x5f,
0x7e, 0x7a, 0x3f, 0x13, 0xe9, 0xc8, 0x9a, 0x1d, 0x8b, 0xa1, 0x4f, 0x60, 0xa1, 0xa7, 0xb7, 0x61,
0x95, 0x77, 0xca, 0xbb, 0xcb, 0x4f, 0x37, 0x33, 0xf2, 0x66, 0x93, 0x76, 0x24, 0x84, 0xbe, 0x80,
0x9a, 0xcb, 0xa4, 0x22, 0xdc, 0xa1, 0xd2, 0xaa, 0x18, 0x0d, 0x2b, 0xa3, 0x11, 0xc7, 0xd1, 0xbe,
0x16, 0x45, 0xbb, 0x50, 0x71, 0x7a, 0xa1, 0xb4, 0x16, 0x8c, 0xca, 0x7a, 0x46, 0xa5, 0xde, 0x3a,
0xb3, 0x8d, 0x04, 0x7e, 0x01, 0xd5, 0x53, 0xd1, 0x13, 0x9e, 0xe8, 0x0c, 0xd0, 0x33, 0x00, 0x1e,
0xfa, 0xe4, 0x07, 0x87, 0x7a, 0x9e, 0xb4, 0x4a, 0x46, 0x77, 0x23, 0xab, 0x4b, 0x3d, 0xcf, 0xae,
0x69, 0x41, 0xfd, 0x24, 0xf1, 0xbf, 0x4a, 0xb0, 0xd8, 0x6e, 0x1e, 0x30, 0x21, 0x11, 0x86, 0x15,
0x9f, 0xf0, 0xf0, 0x92, 0x38, 0x2a, 0x0c, 0x68, 0x60, 0xe2, 0x54, 0xb3, 0x47, 0xe6, 0x74, 0x16,
0xf5, 0x02, 0xe1, 0x86, 0x4e, 0x12, 0xe1, 0x64, 0x98, 0x4e, 0xc0, 0xf2, 0x48, 0x02, 0xa2, 0x3b,
0x50, 0x96, 0x57, 0xa1, 0x55, 0x31, 0xb3, 0xfa, 0x51, 0x1f, 0xde, 0x25, 0xf1, 0x99, 0x37, 0xb0,
0x16, 0xcc, 0x64, 0x3c, 0xc2, 0xff, 0x2c, 0x41, 0xf5, 0x90, 0xc9, 0xab, 0x13, 0x7e, 0x29, 0x8c,
0x90, 0x08, 0x7c, 0xa2, 0x62, 0x47, 0xe2, 0x11, 0xda, 0x81, 0xe5, 0x0b, 0xe2, 0x5c, 0x31, 0xde,
0x39, 0x62, 0x1e, 0x8d, 0xdd, 0x48, 0x4f, 0xa1, 0x87, 0x00, 0xda, 0x5f, 0xe2, 0xb5, 0x93, 0xfc,
0xa9, 0xd8, 0xa9, 0x19, 0x8d, 0xa0, 0x43, 0x92, 0x08, 0x54, 0x8c, 0x40, 0x7a, 0x0a, 0xff, 0xbf,
0x04, 0xab, 0x75, 0x2f, 0x94, 0x8a, 0x06, 0x75, 0xc1, 0x2f, 0x59, 0x07, 0xed, 0x01, 0x6a, 0xbc,
0xeb, 0x11, 0xee, 0x6a, 0xff, 0x64, 0x83, 0x93, 0x0b, 0x8f, 0x46, 0xa9, 0x54, 0xb5, 0x73, 0x56,
0xd0, 0x1f, 0x61, 0xeb, 0x28, 0xa0, 0x54, 0xe7, 0x83, 0x4d, 0x7b, 0x22, 0x50, 0x8c, 0x77, 0x0e,
0x99, 0x8c, 0xd4, 0xe6, 0x8d, 0x5a, 0xb1, 0x00, 0x7a, 0x0e, 0xd6, 0x81, 0x70, 0xba, 0xf2, 0x90,
0xc9, 0x9e, 0x47, 0x06, 0x47, 0x22, 0x68, 0x1c, 0x9d, 0x1c, 0x87, 0x54, 0x2a, 0x69, 0xf6, 0x53,
0xb5, 0x0b, 0xd7, 0xb5, 0x6e, 0x9b, 0x06, 0x8c, 0x78, 0x75, 0xc1, 0xa5, 0xf0, 0xe8, 0x4b, 0x71,
0x6d, 0xb8, 0x12, 0xe9, 0x16, 0xad, 0xe3, 0xcf, 0x60, 0xeb, 0x84, 0x2b, 0x1a, 0x5c, 0x12, 0x87,
0x1e, 0x30, 0xee, 0x32, 0xde, 0x69, 0xb2, 0x4e, 0x40, 0x94, 0x3e, 0xc7, 0x4d, 0x5d, 0x7c, 0xaa,
0x2b, 0xdc, 0xe4, 0x40, 0xa2, 0x11, 0xfe, 0xdf, 0x12, 0x6c, 0x9c, 0x47, 0xc1, 0x6b, 0x12, 0xa7,
0xcb, 0x38, 0x7d, 0xdd, 0xd3, 0x0a, 0x12, 0x7d, 0x0b, 0xeb, 0xa3, 0x0b, 0x51, 0xa6, 0xc5, 0x7d,
0x2d, 0x5b, 0x6d, 0xd1, 0xb2, 0x9d, 0xab, 0x84, 0x9e, 0xc1, 0x46, 0x93, 0xfa, 0x07, 0xc4, 0xf3,
0x84, 0xe0, 0x6d, 0x45, 0x94, 0x6c, 0xd1, 0x80, 0x89, 0x28, 0x9a, 0xab, 0x76, 0xfe, 0x22, 0xfa,
0x3d, 0xdc, 0x6b, 0x05, 0x54, 0xcf, 0x3b, 0x44, 0x51, 0xf7, 0x5c, 0x78, 0xa1, 0x1f, 0xd7, 0x6f,
0xcd, 0xce, 0x5b, 0xd2, 0x0d, 0x58, 0xc5, 0x35, 0x65, 0xe2, 0x95, 0xd7, 0x80, 0x93, 0xa2, 0xb3,
0x87, 0xa2, 0xa8, 0x0d, 0x35, 0x93, 0x00, 0x3a, 0x77, 0xe3, 0xca, 0xfd, 0x3c, 0xa3, 0x97, 0x1b,
0xa6, 0xbd, 0xa1, 0x5e, 0x83, 0xab, 0x60, 0x60, 0x5f, 0xe3, 0x14, 0x64, 0xdd, 0x62, 0x61, 0xd6,
0x1d, 0xc2, 0xaa, 0x93, 0x4e, 0x5b, 0x6b, 0xc9, 0x6c, 0xe0, 0x61, 0xb6, 0x0d, 0xa4, 0xa5, 0xec,
0x51, 0x25, 0xf4, 0xbe, 0x04, 0x5b, 0x2c, 0x49, 0x83, 0x43, 0xe1, 0x13, 0xc6, 0xbf, 0x52, 0x8a,
0x38, 0x5d, 0x9f, 0x72, 0x65, 0x55, 0xcd, 0xde, 0x1a, 0x1f, 0xb8, 0xb7, 0x93, 0x22, 0x9c, 0x68,
0xaf, 0xc5, 0x76, 0x10, 0x07, 0x34, 0x5c, 0x1c, 0x26, 0xa1, 0x55, 0x33, 0xd6, 0xbf, 0xbc, 0xad,
0xf5, 0x21, 0x40, 0x64, 0x36, 0x07, 0x79, 0xfb, 0x2d, 0xac, 0x8d, 0x1e, 0x84, 0x6e, 0x5c, 0x57,
0x74, 0x10, 0x67, 0xbb, 0x7e, 0x44, 0xfb, 0xe9, 0xcb, 0x2d, 0x2f, 0x31, 0x92, 0xee, 0x15, 0xdf,
0x7b, 0xcf, 0xe7, 0xff, 0x50, 0xda, 0x7e, 0x09, 0x0f, 0x6f, 0x8e, 0x42, 0x8e, 0xa1, 0x91, 0x5b,
0xb4, 0x96, 0x46, 0xfb, 0x11, 0xee, 0x17, 0xec, 0x2a, 0x07, 0xe6, 0xc5, 0xa8, 0xbf, 0xbf, 0xcb,
0xf8, 0x5b, 0x58, 0xed, 0x29, 0x93, 0xb8, 0x0f, 0x70, 0xde, 0x3c, 0xb1, 0xe9, 0x8f, 0xba, 0xc1,
0xa0, 0xc7, 0x50, 0xee, 0xfb, 0x2c, 0xae, 0xe1, 0xec, 0xe5, 0xa4, 0x25, 0xb5, 0x00, 0x7a, 0x01,
0x4b, 0x22, 0x3a, 0x86, 0xd8, 0xfa, 0xe3, 0x0f, 0x3b, 0x34, 0x3b, 0x51, 0xc3, 0xa7, 0x70, 0xe7,
0xda, 0x9f, 0x5b, 0x5a, 0xb7, 0x46, 0xad, 0xaf, 0x5c, 0xa3, 0xbe, 0x2f, 0xc1, 0x72, 0xe3, 0x1d,
0x75, 0x12, 0xc4, 0x87, 0x00, 0xae, 0x39, 0x95, 0x57, 0xc4, 0xa7, 0x71, 0xf0, 0x52, 0x33, 0x1a,
0xa9, 0x2e, 0x7c, 0x9f, 0x70, 0x37, 0xb9, 0xf2, 0xe2, 0xa1, 0xe6, 0x1a, 0x5f, 0x05, 0x9d, 0xa4,
0x99, 0x98, 0x67, 0xf4, 0x18, 0xd6, 0x14, 0xf3, 0xa9, 0x08, 0x55, 0x9b, 0x3a, 0x82, 0xbb, 0xd2,
0xf4, 0x90, 0x05, 0x7b, 0x6c, 0x16, 0xaf, 0xc1, 0x4a, 0xc3, 0xef, 0xa9, 0x41, 0xec, 0x05, 0xfe,
0x12, 0xaa, 0x76, 0x8a, 0xcb, 0xc9, 0xd0, 0x71, 0xa8, 0x94, 0xf1, 0x05, 0x93, 0x0c, 0xf5, 0x8a,
0x4f, 0xa5, 0x24, 0x9d, 0x24, 0x31, 0x92, 0x21, 0xfe, 0x01, 0xd6, 0xa2, 0xdc, 0x9a, 0x96, 0x48,
0x6e, 0xc2, 0x62, 0xb4, 0xf9, 0xd8, 0x42, 0x3c, 0xc2, 0x1c, 0xee, 0x45, 0x06, 0x4c, 0x77, 0x9d,
0xd6, 0xca, 0x0e, 0x2c, 0xbb, 0xd7, 0x68, 0xc9, 0x25, 0x9e, 0x9a, 0xc2, 0xef, 0xe0, 0xae, 0xb9,
0xd0, 0x4c, 0x35, 0x4d, 0x69, 0xed, 0x13, 0xb8, 0xdb, 0x19, 0xc7, 0x8a, 0x6d, 0x66, 0x17, 0xf0,
0x3f, 0x4a, 0xb0, 0x61, 0x4c, 0x9f, 0x49, 0x1a, 0xbc, 0x64, 0x52, 0x4d, 0x6b, 0xfe, 0x19, 0x6c,
0x74, 0xf2, 0xf0, 0x62, 0x17, 0xf2, 0x17, 0xf1, 0xbf, 0x4b, 0x60, 0x19, 0x37, 0x34, 0xa7, 0x91,
0x03, 0xa9, 0xa8, 0x3f, 0x75, 0xd8, 0x9f, 0x83, 0xd5, 0x29, 0x80, 0x8c, 0x9d, 0x29, 0x5c, 0xc7,
0x03, 0x58, 0x89, 0xca, 0x66, 0x3a, 0x17, 0xb6, 0xa1, 0x4a, 0xdf, 0x31, 0x55, 0x17, 0x6e, 0x64,
0x72, 0xc1, 0x1e, 0x8e, 0x75, 0xee, 0x49, 0xe5, 0xbe, 0x0e, 0x55, 0x4c, 0x21, 0xe3, 0x11, 0xfe,
0x0e, 0xee, 0x98, 0x48, 0xb4, 0x34, 0x51, 0xfe, 0xc0, 0xb2, 0xcd, 0x16, 0xe2, 0x7c, 0x6e, 0x21,
0x7e, 0x13, 0xe7, 0x59, 0x84, 0x3d, 0xd5, 0xde, 0xb0, 0x80, 0x55, 0xcd, 0xe9, 0x7e, 0xa2, 0xb7,
0xed, 0x56, 0x5f, 0xc0, 0x66, 0xc8, 0x2f, 0x8d, 0xea, 0x69, 0x9e, 0xd3, 0x05, 0xab, 0xf8, 0x2d,
0xdc, 0x8d, 0xde, 0x50, 0x0e, 0x43, 0xbf, 0x77, 0x5b, 0xa3, 0xdb, 0x50, 0x75, 0x43, 0xbf, 0xd7,
0x22, 0xaa, 0x1b, 0x1f, 0xfe, 0x70, 0x8c, 0x2f, 0xe0, 0xa3, 0x76, 0xe3, 0x7c, 0x16, 0xb5, 0xa7,
0x9b, 0x19, 0xed, 0x1b, 0x56, 0x14, 0x37, 0xe2, 0x78, 0x88, 0xff, 0x5e, 0x82, 0xad, 0x97, 0xe6,
0x9d, 0xb9, 0x49, 0x89, 0x0c, 0x03, 0xaa, 0x2f, 0xc4, 0x19, 0x94, 0xba, 0x37, 0x8e, 0x19, 0x1b,
0xce, 0x2e, 0xe0, 0xef, 0x35, 0xdf, 0xfd, 0x2b, 0x75, 0x54, 0xe4, 0x47, 0x9b, 0x3a, 0x01, 0x55,
0xb3, 0xbb, 0x6a, 0x24, 0x6c, 0x1e, 0xb2, 0x40, 0x0d, 0x6c, 0xa2, 0xe8, 0x4c, 0xda, 0x26, 0x86,
0x15, 0x37, 0x01, 0x6c, 0x5e, 0x44, 0xf6, 0xca, 0xf6, 0xc8, 0x1c, 0x96, 0x80, 0xda, 0x4e, 0x40,
0x29, 0x97, 0x5d, 0x31, 0x75, 0x38, 0x11, 0x54, 0x7c, 0xe6, 0x27, 0xcd, 0xc1, 0x3c, 0xeb, 0x39,
0x97, 0x28, 0x62, 0x6a, 0x74, 0xc5, 0x36, 0xcf, 0xf8, 0x0d, 0xac, 0x1e, 0x10, 0xe7, 0x2a, 0xec,
0xcd, 0x2c, 0x78, 0x4f, 0xff, 0xbb, 0x09, 0xe5, 0xba, 0xef, 0xa2, 0x57, 0x80, 0xda, 0x03, 0xee,
0x8c, 0x72, 0x05, 0xf4, 0x8b, 0x5c, 0xc8, 0xc8, 0xf8, 0x76, 0xf1, 0xd6, 0xf0, 0x1c, 0x7a, 0x0d,
0xf7, 0x5a, 0x24, 0x94, 0x74, 0x66, 0x80, 0x6f, 0x60, 0xe3, 0x8c, 0xf7, 0x66, 0x0a, 0xd9, 0x86,
0xf5, 0xa8, 0x91, 0x8c, 0x21, 0x66, 0x89, 0xfc, 0x48, 0xbf, 0xb9, 0x19, 0xd4, 0x86, 0xcd, 0xb3,
0xb8, 0x8d, 0xcc, 0x32, 0x98, 0x36, 0x95, 0x54, 0xcd, 0x0c, 0xf0, 0x14, 0xac, 0xb6, 0xb8, 0x54,
0x36, 0xbd, 0x10, 0x62, 0x76, 0xa8, 0x36, 0x6c, 0xb6, 0xbb, 0xa1, 0x72, 0xc5, 0xdf, 0xf8, 0xcc,
0x30, 0x5f, 0x01, 0xfa, 0x96, 0x79, 0xde, 0xcc, 0xf0, 0x5a, 0xb0, 0x7e, 0x48, 0x3d, 0xaa, 0x66,
0x77, 0x38, 0x6f, 0x61, 0x23, 0xe2, 0xcf, 0xe3, 0x90, 0xbf, 0xca, 0x7e, 0xe7, 0x1a, 0xe3, 0xd9,
0x13, 0x4f, 0x5d, 0x97, 0xe4, 0x50, 0xe9, 0x94, 0x04, 0x1d, 0xaa, 0xa6, 0xf0, 0xf4, 0x4f, 0xf0,
0xa0, 0x4e, 0xb8, 0x43, 0xc7, 0xa2, 0x79, 0xfd, 0xed, 0x61, 0xba, 0xa3, 0x67, 0x1d, 0x4e, 0xbc,
0xc8, 0xc9, 0x96, 0x70, 0xeb, 0x1e, 0x25, 0x3c, 0xec, 0x4d, 0x81, 0xf9, 0x67, 0x78, 0x74, 0xc4,
0x38, 0xf1, 0xd8, 0x78, 0x25, 0xcd, 0xc2, 0xe1, 0x57, 0x80, 0xbe, 0x16, 0xaa, 0xe7, 0x85, 0x9d,
0xaf, 0x85, 0x54, 0x87, 0xb4, 0xcf, 0x1c, 0x2a, 0xa7, 0xc0, 0x6b, 0x42, 0xed, 0x98, 0xaa, 0x88,
0xbb, 0xa3, 0x07, 0x19, 0xc9, 0xf4, 0x5b, 0xc8, 0xf6, 0xa3, 0xec, 0x0b, 0xed, 0xc8, 0x4b, 0x85,
0x49, 0xaa, 0xb5, 0x21, 0x9c, 0xb9, 0xd3, 0x26, 0x61, 0xfe, 0xa6, 0x00, 0x73, 0xe4, 0x42, 0x34,
0x3d, 0x6f, 0xe5, 0x98, 0xaa, 0x21, 0xe7, 0x9f, 0x04, 0x8b, 0x33, 0xcb, 0x99, 0xd7, 0x05, 0x03,
0x5a, 0x3d, 0xa6, 0x86, 0x5b, 0x4f, 0xf4, 0xf3, 0x71, 0x3e, 0x60, 0x86, 0x97, 0xcf, 0xa1, 0xbf,
0x98, 0x10, 0xa4, 0x38, 0xf2, 0x24, 0xe8, 0x8f, 0xf3, 0xa1, 0xf3, 0x58, 0xf6, 0x1c, 0x3a, 0x80,
0x8a, 0xe6, 0xa2, 0x93, 0x30, 0x6f, 0x3c, 0xf3, 0x06, 0x54, 0x34, 0x57, 0x47, 0xbf, 0xcc, 0x62,
0x5c, 0xbf, 0xf9, 0x6e, 0x3f, 0x28, 0x58, 0x4d, 0x35, 0xe3, 0xda, 0x90, 0x1b, 0xe7, 0x34, 0x8d,
0x71, 0x4e, 0x5e, 0x74, 0x26, 0x69, 0x6a, 0x6d, 0xaa, 0xc7, 0x1a, 0xab, 0x9a, 0x21, 0x85, 0x45,
0xb8, 0xe0, 0x0b, 0x7c, 0x8a, 0xdf, 0x4e, 0xea, 0x79, 0xfa, 0x6c, 0x52, 0x7f, 0xac, 0xdc, 0x3e,
0x3d, 0x73, 0xfe, 0x95, 0x89, 0xfb, 0x48, 0x86, 0x86, 0xd4, 0x5b, 0x67, 0x72, 0xca, 0xcb, 0x2e,
0x83, 0x19, 0xff, 0xc1, 0x31, 0xcd, 0x9d, 0x0c, 0xc7, 0x54, 0xc5, 0xf4, 0x7d, 0xd2, 0xf6, 0x77,
0xb2, 0x1f, 0x61, 0x47, 0x79, 0x3f, 0x9e, 0x43, 0x04, 0xd6, 0x8f, 0xa9, 0xca, 0x50, 0xf5, 0x9b,
0x5d, 0xcc, 0x7e, 0x6b, 0x2a, 0xe4, 0xfa, 0x78, 0x0e, 0x7d, 0x0f, 0x28, 0x4b, 0xc4, 0x51, 0xde,
0xf7, 0xaa, 0x02, 0xb6, 0x7e, 0x73, 0x48, 0x1c, 0xb8, 0x3f, 0x6c, 0x5a, 0xa3, 0x8c, 0x7c, 0x52,
0x7c, 0x7e, 0x9b, 0xf3, 0x89, 0x2f, 0x8f, 0xd1, 0x9b, 0x5e, 0xb3, 0xaa, 0xe3, 0x3e, 0xe4, 0xde,
0x37, 0xc7, 0xe7, 0xd7, 0xd9, 0xc0, 0x67, 0x58, 0x7b, 0xc4, 0x04, 0x23, 0x62, 0x3d, 0x91, 0x09,
0x8e, 0xf0, 0xef, 0x1b, 0xc3, 0x71, 0x50, 0xf9, 0x6e, 0xbe, 0xff, 0xe4, 0x62, 0xd1, 0xfc, 0x31,
0xf9, 0xd9, 0xcf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x83, 0xa6, 0x93, 0x1a, 0xc5, 0x1c, 0x00, 0x00,
}
// Code generated by MockGen. DO NOT EDIT.
// Source: pkg/handler-launcher-com/cmd/v1/cmd.pb.go
//
// Generated by this command:
//
// mockgen -source pkg/handler-launcher-com/cmd/v1/cmd.pb.go -package=v1 -destination=pkg/handler-launcher-com/cmd/v1/generated_mock_cmd.go
//
// Package v1 is a generated GoMock package.
package v1
import (
reflect "reflect"
gomock "go.uber.org/mock/gomock"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// MockCmdClient is a mock of CmdClient interface.
type MockCmdClient struct {
ctrl *gomock.Controller
recorder *MockCmdClientMockRecorder
isgomock struct{}
}
// MockCmdClientMockRecorder is the mock recorder for MockCmdClient.
type MockCmdClientMockRecorder struct {
mock *MockCmdClient
}
// NewMockCmdClient creates a new mock instance.
func NewMockCmdClient(ctrl *gomock.Controller) *MockCmdClient {
mock := &MockCmdClient{ctrl: ctrl}
mock.recorder = &MockCmdClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCmdClient) EXPECT() *MockCmdClientMockRecorder {
return m.recorder
}
// BackupVirtualMachine mocks base method.
func (m *MockCmdClient) BackupVirtualMachine(ctx context.Context, in *BackupRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "BackupVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// BackupVirtualMachine indicates an expected call of BackupVirtualMachine.
func (mr *MockCmdClientMockRecorder) BackupVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).BackupVirtualMachine), varargs...)
}
// CancelVirtualMachineMigration mocks base method.
func (m *MockCmdClient) CancelVirtualMachineMigration(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CancelVirtualMachineMigration", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CancelVirtualMachineMigration indicates an expected call of CancelVirtualMachineMigration.
func (mr *MockCmdClientMockRecorder) CancelVirtualMachineMigration(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelVirtualMachineMigration", reflect.TypeOf((*MockCmdClient)(nil).CancelVirtualMachineMigration), varargs...)
}
// DeleteVirtualMachine mocks base method.
func (m *MockCmdClient) DeleteVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "DeleteVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteVirtualMachine indicates an expected call of DeleteVirtualMachine.
func (mr *MockCmdClientMockRecorder) DeleteVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).DeleteVirtualMachine), varargs...)
}
// Exec mocks base method.
func (m *MockCmdClient) Exec(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Exec", varargs...)
ret0, _ := ret[0].(*ExecResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Exec indicates an expected call of Exec.
func (mr *MockCmdClientMockRecorder) Exec(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockCmdClient)(nil).Exec), varargs...)
}
// FinalizeVirtualMachineMigration mocks base method.
func (m *MockCmdClient) FinalizeVirtualMachineMigration(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FinalizeVirtualMachineMigration", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FinalizeVirtualMachineMigration indicates an expected call of FinalizeVirtualMachineMigration.
func (mr *MockCmdClientMockRecorder) FinalizeVirtualMachineMigration(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeVirtualMachineMigration", reflect.TypeOf((*MockCmdClient)(nil).FinalizeVirtualMachineMigration), varargs...)
}
// FreezeVirtualMachine mocks base method.
func (m *MockCmdClient) FreezeVirtualMachine(ctx context.Context, in *FreezeRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "FreezeVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FreezeVirtualMachine indicates an expected call of FreezeVirtualMachine.
func (mr *MockCmdClientMockRecorder) FreezeVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FreezeVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).FreezeVirtualMachine), varargs...)
}
// GetDomain mocks base method.
func (m *MockCmdClient) GetDomain(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DomainResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetDomain", varargs...)
ret0, _ := ret[0].(*DomainResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDomain indicates an expected call of GetDomain.
func (mr *MockCmdClientMockRecorder) GetDomain(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDomain", reflect.TypeOf((*MockCmdClient)(nil).GetDomain), varargs...)
}
// GetDomainDirtyRateStats mocks base method.
func (m *MockCmdClient) GetDomainDirtyRateStats(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DirtyRateStatsResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetDomainDirtyRateStats", varargs...)
ret0, _ := ret[0].(*DirtyRateStatsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDomainDirtyRateStats indicates an expected call of GetDomainDirtyRateStats.
func (mr *MockCmdClientMockRecorder) GetDomainDirtyRateStats(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDomainDirtyRateStats", reflect.TypeOf((*MockCmdClient)(nil).GetDomainDirtyRateStats), varargs...)
}
// GetDomainStats mocks base method.
func (m *MockCmdClient) GetDomainStats(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*DomainStatsResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetDomainStats", varargs...)
ret0, _ := ret[0].(*DomainStatsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDomainStats indicates an expected call of GetDomainStats.
func (mr *MockCmdClientMockRecorder) GetDomainStats(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDomainStats", reflect.TypeOf((*MockCmdClient)(nil).GetDomainStats), varargs...)
}
// GetFilesystems mocks base method.
func (m *MockCmdClient) GetFilesystems(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestFilesystemsResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetFilesystems", varargs...)
ret0, _ := ret[0].(*GuestFilesystemsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetFilesystems indicates an expected call of GetFilesystems.
func (mr *MockCmdClientMockRecorder) GetFilesystems(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFilesystems", reflect.TypeOf((*MockCmdClient)(nil).GetFilesystems), varargs...)
}
// GetGuestInfo mocks base method.
func (m *MockCmdClient) GetGuestInfo(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestInfoResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetGuestInfo", varargs...)
ret0, _ := ret[0].(*GuestInfoResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetGuestInfo indicates an expected call of GetGuestInfo.
func (mr *MockCmdClientMockRecorder) GetGuestInfo(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGuestInfo", reflect.TypeOf((*MockCmdClient)(nil).GetGuestInfo), varargs...)
}
// GetLaunchMeasurement mocks base method.
func (m *MockCmdClient) GetLaunchMeasurement(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*LaunchMeasurementResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetLaunchMeasurement", varargs...)
ret0, _ := ret[0].(*LaunchMeasurementResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetLaunchMeasurement indicates an expected call of GetLaunchMeasurement.
func (mr *MockCmdClientMockRecorder) GetLaunchMeasurement(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLaunchMeasurement", reflect.TypeOf((*MockCmdClient)(nil).GetLaunchMeasurement), varargs...)
}
// GetQemuVersion mocks base method.
func (m *MockCmdClient) GetQemuVersion(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*QemuVersionResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetQemuVersion", varargs...)
ret0, _ := ret[0].(*QemuVersionResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetQemuVersion indicates an expected call of GetQemuVersion.
func (mr *MockCmdClientMockRecorder) GetQemuVersion(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQemuVersion", reflect.TypeOf((*MockCmdClient)(nil).GetQemuVersion), varargs...)
}
// GetSEVInfo mocks base method.
func (m *MockCmdClient) GetSEVInfo(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*SEVInfoResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetSEVInfo", varargs...)
ret0, _ := ret[0].(*SEVInfoResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSEVInfo indicates an expected call of GetSEVInfo.
func (mr *MockCmdClientMockRecorder) GetSEVInfo(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSEVInfo", reflect.TypeOf((*MockCmdClient)(nil).GetSEVInfo), varargs...)
}
// GetScreenshot mocks base method.
func (m *MockCmdClient) GetScreenshot(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*ScreenshotResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetScreenshot", varargs...)
ret0, _ := ret[0].(*ScreenshotResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetScreenshot indicates an expected call of GetScreenshot.
func (mr *MockCmdClientMockRecorder) GetScreenshot(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScreenshot", reflect.TypeOf((*MockCmdClient)(nil).GetScreenshot), varargs...)
}
// GetUsers mocks base method.
func (m *MockCmdClient) GetUsers(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*GuestUserListResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetUsers", varargs...)
ret0, _ := ret[0].(*GuestUserListResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetUsers indicates an expected call of GetUsers.
func (mr *MockCmdClientMockRecorder) GetUsers(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUsers", reflect.TypeOf((*MockCmdClient)(nil).GetUsers), varargs...)
}
// GuestPing mocks base method.
func (m *MockCmdClient) GuestPing(ctx context.Context, in *GuestPingRequest, opts ...grpc.CallOption) (*GuestPingResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GuestPing", varargs...)
ret0, _ := ret[0].(*GuestPingResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GuestPing indicates an expected call of GuestPing.
func (mr *MockCmdClientMockRecorder) GuestPing(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GuestPing", reflect.TypeOf((*MockCmdClient)(nil).GuestPing), varargs...)
}
// HotplugHostDevices mocks base method.
func (m *MockCmdClient) HotplugHostDevices(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "HotplugHostDevices", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// HotplugHostDevices indicates an expected call of HotplugHostDevices.
func (mr *MockCmdClientMockRecorder) HotplugHostDevices(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HotplugHostDevices", reflect.TypeOf((*MockCmdClient)(nil).HotplugHostDevices), varargs...)
}
// InjectLaunchSecret mocks base method.
func (m *MockCmdClient) InjectLaunchSecret(ctx context.Context, in *InjectLaunchSecretRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "InjectLaunchSecret", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// InjectLaunchSecret indicates an expected call of InjectLaunchSecret.
func (mr *MockCmdClientMockRecorder) InjectLaunchSecret(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InjectLaunchSecret", reflect.TypeOf((*MockCmdClient)(nil).InjectLaunchSecret), varargs...)
}
// KillVirtualMachine mocks base method.
func (m *MockCmdClient) KillVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "KillVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// KillVirtualMachine indicates an expected call of KillVirtualMachine.
func (mr *MockCmdClientMockRecorder) KillVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).KillVirtualMachine), varargs...)
}
// MigrateVirtualMachine mocks base method.
func (m *MockCmdClient) MigrateVirtualMachine(ctx context.Context, in *MigrationRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "MigrateVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MigrateVirtualMachine indicates an expected call of MigrateVirtualMachine.
func (mr *MockCmdClientMockRecorder) MigrateVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MigrateVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).MigrateVirtualMachine), varargs...)
}
// PauseVirtualMachine mocks base method.
func (m *MockCmdClient) PauseVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "PauseVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PauseVirtualMachine indicates an expected call of PauseVirtualMachine.
func (mr *MockCmdClientMockRecorder) PauseVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).PauseVirtualMachine), varargs...)
}
// Ping mocks base method.
func (m *MockCmdClient) Ping(ctx context.Context, in *EmptyRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Ping", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Ping indicates an expected call of Ping.
func (mr *MockCmdClientMockRecorder) Ping(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockCmdClient)(nil).Ping), varargs...)
}
// ResetVirtualMachine mocks base method.
func (m *MockCmdClient) ResetVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ResetVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ResetVirtualMachine indicates an expected call of ResetVirtualMachine.
func (mr *MockCmdClientMockRecorder) ResetVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).ResetVirtualMachine), varargs...)
}
// ShutdownVirtualMachine mocks base method.
func (m *MockCmdClient) ShutdownVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ShutdownVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ShutdownVirtualMachine indicates an expected call of ShutdownVirtualMachine.
func (mr *MockCmdClientMockRecorder) ShutdownVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShutdownVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).ShutdownVirtualMachine), varargs...)
}
// SignalTargetPodCleanup mocks base method.
func (m *MockCmdClient) SignalTargetPodCleanup(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SignalTargetPodCleanup", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SignalTargetPodCleanup indicates an expected call of SignalTargetPodCleanup.
func (mr *MockCmdClientMockRecorder) SignalTargetPodCleanup(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignalTargetPodCleanup", reflect.TypeOf((*MockCmdClient)(nil).SignalTargetPodCleanup), varargs...)
}
// SoftRebootVirtualMachine mocks base method.
func (m *MockCmdClient) SoftRebootVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SoftRebootVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SoftRebootVirtualMachine indicates an expected call of SoftRebootVirtualMachine.
func (mr *MockCmdClientMockRecorder) SoftRebootVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SoftRebootVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).SoftRebootVirtualMachine), varargs...)
}
// SyncMigrationTarget mocks base method.
func (m *MockCmdClient) SyncMigrationTarget(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SyncMigrationTarget", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncMigrationTarget indicates an expected call of SyncMigrationTarget.
func (mr *MockCmdClientMockRecorder) SyncMigrationTarget(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMigrationTarget", reflect.TypeOf((*MockCmdClient)(nil).SyncMigrationTarget), varargs...)
}
// SyncVirtualMachine mocks base method.
func (m *MockCmdClient) SyncVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SyncVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncVirtualMachine indicates an expected call of SyncVirtualMachine.
func (mr *MockCmdClientMockRecorder) SyncVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).SyncVirtualMachine), varargs...)
}
// SyncVirtualMachineCPUs mocks base method.
func (m *MockCmdClient) SyncVirtualMachineCPUs(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SyncVirtualMachineCPUs", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncVirtualMachineCPUs indicates an expected call of SyncVirtualMachineCPUs.
func (mr *MockCmdClientMockRecorder) SyncVirtualMachineCPUs(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncVirtualMachineCPUs", reflect.TypeOf((*MockCmdClient)(nil).SyncVirtualMachineCPUs), varargs...)
}
// SyncVirtualMachineMemory mocks base method.
func (m *MockCmdClient) SyncVirtualMachineMemory(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SyncVirtualMachineMemory", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncVirtualMachineMemory indicates an expected call of SyncVirtualMachineMemory.
func (mr *MockCmdClientMockRecorder) SyncVirtualMachineMemory(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncVirtualMachineMemory", reflect.TypeOf((*MockCmdClient)(nil).SyncVirtualMachineMemory), varargs...)
}
// UnfreezeVirtualMachine mocks base method.
func (m *MockCmdClient) UnfreezeVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UnfreezeVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UnfreezeVirtualMachine indicates an expected call of UnfreezeVirtualMachine.
func (mr *MockCmdClientMockRecorder) UnfreezeVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnfreezeVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).UnfreezeVirtualMachine), varargs...)
}
// UnpauseVirtualMachine mocks base method.
func (m *MockCmdClient) UnpauseVirtualMachine(ctx context.Context, in *VMIRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UnpauseVirtualMachine", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UnpauseVirtualMachine indicates an expected call of UnpauseVirtualMachine.
func (mr *MockCmdClientMockRecorder) UnpauseVirtualMachine(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnpauseVirtualMachine", reflect.TypeOf((*MockCmdClient)(nil).UnpauseVirtualMachine), varargs...)
}
// VirtualMachineMemoryDump mocks base method.
func (m *MockCmdClient) VirtualMachineMemoryDump(ctx context.Context, in *MemoryDumpRequest, opts ...grpc.CallOption) (*Response, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "VirtualMachineMemoryDump", varargs...)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VirtualMachineMemoryDump indicates an expected call of VirtualMachineMemoryDump.
func (mr *MockCmdClientMockRecorder) VirtualMachineMemoryDump(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VirtualMachineMemoryDump", reflect.TypeOf((*MockCmdClient)(nil).VirtualMachineMemoryDump), varargs...)
}
// MockCmdServer is a mock of CmdServer interface.
type MockCmdServer struct {
ctrl *gomock.Controller
recorder *MockCmdServerMockRecorder
isgomock struct{}
}
// MockCmdServerMockRecorder is the mock recorder for MockCmdServer.
type MockCmdServerMockRecorder struct {
mock *MockCmdServer
}
// NewMockCmdServer creates a new mock instance.
func NewMockCmdServer(ctrl *gomock.Controller) *MockCmdServer {
mock := &MockCmdServer{ctrl: ctrl}
mock.recorder = &MockCmdServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCmdServer) EXPECT() *MockCmdServerMockRecorder {
return m.recorder
}
// BackupVirtualMachine mocks base method.
func (m *MockCmdServer) BackupVirtualMachine(arg0 context.Context, arg1 *BackupRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BackupVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// BackupVirtualMachine indicates an expected call of BackupVirtualMachine.
func (mr *MockCmdServerMockRecorder) BackupVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).BackupVirtualMachine), arg0, arg1)
}
// CancelVirtualMachineMigration mocks base method.
func (m *MockCmdServer) CancelVirtualMachineMigration(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CancelVirtualMachineMigration", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CancelVirtualMachineMigration indicates an expected call of CancelVirtualMachineMigration.
func (mr *MockCmdServerMockRecorder) CancelVirtualMachineMigration(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelVirtualMachineMigration", reflect.TypeOf((*MockCmdServer)(nil).CancelVirtualMachineMigration), arg0, arg1)
}
// DeleteVirtualMachine mocks base method.
func (m *MockCmdServer) DeleteVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteVirtualMachine indicates an expected call of DeleteVirtualMachine.
func (mr *MockCmdServerMockRecorder) DeleteVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).DeleteVirtualMachine), arg0, arg1)
}
// Exec mocks base method.
func (m *MockCmdServer) Exec(arg0 context.Context, arg1 *ExecRequest) (*ExecResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Exec", arg0, arg1)
ret0, _ := ret[0].(*ExecResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Exec indicates an expected call of Exec.
func (mr *MockCmdServerMockRecorder) Exec(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockCmdServer)(nil).Exec), arg0, arg1)
}
// FinalizeVirtualMachineMigration mocks base method.
func (m *MockCmdServer) FinalizeVirtualMachineMigration(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FinalizeVirtualMachineMigration", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FinalizeVirtualMachineMigration indicates an expected call of FinalizeVirtualMachineMigration.
func (mr *MockCmdServerMockRecorder) FinalizeVirtualMachineMigration(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeVirtualMachineMigration", reflect.TypeOf((*MockCmdServer)(nil).FinalizeVirtualMachineMigration), arg0, arg1)
}
// FreezeVirtualMachine mocks base method.
func (m *MockCmdServer) FreezeVirtualMachine(arg0 context.Context, arg1 *FreezeRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FreezeVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FreezeVirtualMachine indicates an expected call of FreezeVirtualMachine.
func (mr *MockCmdServerMockRecorder) FreezeVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FreezeVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).FreezeVirtualMachine), arg0, arg1)
}
// GetDomain mocks base method.
func (m *MockCmdServer) GetDomain(arg0 context.Context, arg1 *EmptyRequest) (*DomainResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetDomain", arg0, arg1)
ret0, _ := ret[0].(*DomainResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDomain indicates an expected call of GetDomain.
func (mr *MockCmdServerMockRecorder) GetDomain(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDomain", reflect.TypeOf((*MockCmdServer)(nil).GetDomain), arg0, arg1)
}
// GetDomainDirtyRateStats mocks base method.
func (m *MockCmdServer) GetDomainDirtyRateStats(arg0 context.Context, arg1 *EmptyRequest) (*DirtyRateStatsResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetDomainDirtyRateStats", arg0, arg1)
ret0, _ := ret[0].(*DirtyRateStatsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDomainDirtyRateStats indicates an expected call of GetDomainDirtyRateStats.
func (mr *MockCmdServerMockRecorder) GetDomainDirtyRateStats(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDomainDirtyRateStats", reflect.TypeOf((*MockCmdServer)(nil).GetDomainDirtyRateStats), arg0, arg1)
}
// GetDomainStats mocks base method.
func (m *MockCmdServer) GetDomainStats(arg0 context.Context, arg1 *EmptyRequest) (*DomainStatsResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetDomainStats", arg0, arg1)
ret0, _ := ret[0].(*DomainStatsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDomainStats indicates an expected call of GetDomainStats.
func (mr *MockCmdServerMockRecorder) GetDomainStats(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDomainStats", reflect.TypeOf((*MockCmdServer)(nil).GetDomainStats), arg0, arg1)
}
// GetFilesystems mocks base method.
func (m *MockCmdServer) GetFilesystems(arg0 context.Context, arg1 *EmptyRequest) (*GuestFilesystemsResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetFilesystems", arg0, arg1)
ret0, _ := ret[0].(*GuestFilesystemsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetFilesystems indicates an expected call of GetFilesystems.
func (mr *MockCmdServerMockRecorder) GetFilesystems(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFilesystems", reflect.TypeOf((*MockCmdServer)(nil).GetFilesystems), arg0, arg1)
}
// GetGuestInfo mocks base method.
func (m *MockCmdServer) GetGuestInfo(arg0 context.Context, arg1 *EmptyRequest) (*GuestInfoResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetGuestInfo", arg0, arg1)
ret0, _ := ret[0].(*GuestInfoResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetGuestInfo indicates an expected call of GetGuestInfo.
func (mr *MockCmdServerMockRecorder) GetGuestInfo(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGuestInfo", reflect.TypeOf((*MockCmdServer)(nil).GetGuestInfo), arg0, arg1)
}
// GetLaunchMeasurement mocks base method.
func (m *MockCmdServer) GetLaunchMeasurement(arg0 context.Context, arg1 *VMIRequest) (*LaunchMeasurementResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetLaunchMeasurement", arg0, arg1)
ret0, _ := ret[0].(*LaunchMeasurementResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetLaunchMeasurement indicates an expected call of GetLaunchMeasurement.
func (mr *MockCmdServerMockRecorder) GetLaunchMeasurement(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLaunchMeasurement", reflect.TypeOf((*MockCmdServer)(nil).GetLaunchMeasurement), arg0, arg1)
}
// GetQemuVersion mocks base method.
func (m *MockCmdServer) GetQemuVersion(arg0 context.Context, arg1 *EmptyRequest) (*QemuVersionResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetQemuVersion", arg0, arg1)
ret0, _ := ret[0].(*QemuVersionResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetQemuVersion indicates an expected call of GetQemuVersion.
func (mr *MockCmdServerMockRecorder) GetQemuVersion(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQemuVersion", reflect.TypeOf((*MockCmdServer)(nil).GetQemuVersion), arg0, arg1)
}
// GetSEVInfo mocks base method.
func (m *MockCmdServer) GetSEVInfo(arg0 context.Context, arg1 *EmptyRequest) (*SEVInfoResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSEVInfo", arg0, arg1)
ret0, _ := ret[0].(*SEVInfoResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSEVInfo indicates an expected call of GetSEVInfo.
func (mr *MockCmdServerMockRecorder) GetSEVInfo(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSEVInfo", reflect.TypeOf((*MockCmdServer)(nil).GetSEVInfo), arg0, arg1)
}
// GetScreenshot mocks base method.
func (m *MockCmdServer) GetScreenshot(arg0 context.Context, arg1 *VMIRequest) (*ScreenshotResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetScreenshot", arg0, arg1)
ret0, _ := ret[0].(*ScreenshotResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetScreenshot indicates an expected call of GetScreenshot.
func (mr *MockCmdServerMockRecorder) GetScreenshot(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScreenshot", reflect.TypeOf((*MockCmdServer)(nil).GetScreenshot), arg0, arg1)
}
// GetUsers mocks base method.
func (m *MockCmdServer) GetUsers(arg0 context.Context, arg1 *EmptyRequest) (*GuestUserListResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetUsers", arg0, arg1)
ret0, _ := ret[0].(*GuestUserListResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetUsers indicates an expected call of GetUsers.
func (mr *MockCmdServerMockRecorder) GetUsers(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUsers", reflect.TypeOf((*MockCmdServer)(nil).GetUsers), arg0, arg1)
}
// GuestPing mocks base method.
func (m *MockCmdServer) GuestPing(arg0 context.Context, arg1 *GuestPingRequest) (*GuestPingResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GuestPing", arg0, arg1)
ret0, _ := ret[0].(*GuestPingResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GuestPing indicates an expected call of GuestPing.
func (mr *MockCmdServerMockRecorder) GuestPing(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GuestPing", reflect.TypeOf((*MockCmdServer)(nil).GuestPing), arg0, arg1)
}
// HotplugHostDevices mocks base method.
func (m *MockCmdServer) HotplugHostDevices(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HotplugHostDevices", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// HotplugHostDevices indicates an expected call of HotplugHostDevices.
func (mr *MockCmdServerMockRecorder) HotplugHostDevices(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HotplugHostDevices", reflect.TypeOf((*MockCmdServer)(nil).HotplugHostDevices), arg0, arg1)
}
// InjectLaunchSecret mocks base method.
func (m *MockCmdServer) InjectLaunchSecret(arg0 context.Context, arg1 *InjectLaunchSecretRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InjectLaunchSecret", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// InjectLaunchSecret indicates an expected call of InjectLaunchSecret.
func (mr *MockCmdServerMockRecorder) InjectLaunchSecret(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InjectLaunchSecret", reflect.TypeOf((*MockCmdServer)(nil).InjectLaunchSecret), arg0, arg1)
}
// KillVirtualMachine mocks base method.
func (m *MockCmdServer) KillVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "KillVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// KillVirtualMachine indicates an expected call of KillVirtualMachine.
func (mr *MockCmdServerMockRecorder) KillVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).KillVirtualMachine), arg0, arg1)
}
// MigrateVirtualMachine mocks base method.
func (m *MockCmdServer) MigrateVirtualMachine(arg0 context.Context, arg1 *MigrationRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MigrateVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MigrateVirtualMachine indicates an expected call of MigrateVirtualMachine.
func (mr *MockCmdServerMockRecorder) MigrateVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MigrateVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).MigrateVirtualMachine), arg0, arg1)
}
// PauseVirtualMachine mocks base method.
func (m *MockCmdServer) PauseVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PauseVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PauseVirtualMachine indicates an expected call of PauseVirtualMachine.
func (mr *MockCmdServerMockRecorder) PauseVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).PauseVirtualMachine), arg0, arg1)
}
// Ping mocks base method.
func (m *MockCmdServer) Ping(arg0 context.Context, arg1 *EmptyRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Ping", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Ping indicates an expected call of Ping.
func (mr *MockCmdServerMockRecorder) Ping(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockCmdServer)(nil).Ping), arg0, arg1)
}
// ResetVirtualMachine mocks base method.
func (m *MockCmdServer) ResetVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResetVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ResetVirtualMachine indicates an expected call of ResetVirtualMachine.
func (mr *MockCmdServerMockRecorder) ResetVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).ResetVirtualMachine), arg0, arg1)
}
// ShutdownVirtualMachine mocks base method.
func (m *MockCmdServer) ShutdownVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ShutdownVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ShutdownVirtualMachine indicates an expected call of ShutdownVirtualMachine.
func (mr *MockCmdServerMockRecorder) ShutdownVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShutdownVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).ShutdownVirtualMachine), arg0, arg1)
}
// SignalTargetPodCleanup mocks base method.
func (m *MockCmdServer) SignalTargetPodCleanup(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SignalTargetPodCleanup", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SignalTargetPodCleanup indicates an expected call of SignalTargetPodCleanup.
func (mr *MockCmdServerMockRecorder) SignalTargetPodCleanup(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignalTargetPodCleanup", reflect.TypeOf((*MockCmdServer)(nil).SignalTargetPodCleanup), arg0, arg1)
}
// SoftRebootVirtualMachine mocks base method.
func (m *MockCmdServer) SoftRebootVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SoftRebootVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SoftRebootVirtualMachine indicates an expected call of SoftRebootVirtualMachine.
func (mr *MockCmdServerMockRecorder) SoftRebootVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SoftRebootVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).SoftRebootVirtualMachine), arg0, arg1)
}
// SyncMigrationTarget mocks base method.
func (m *MockCmdServer) SyncMigrationTarget(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncMigrationTarget", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncMigrationTarget indicates an expected call of SyncMigrationTarget.
func (mr *MockCmdServerMockRecorder) SyncMigrationTarget(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMigrationTarget", reflect.TypeOf((*MockCmdServer)(nil).SyncMigrationTarget), arg0, arg1)
}
// SyncVirtualMachine mocks base method.
func (m *MockCmdServer) SyncVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncVirtualMachine indicates an expected call of SyncVirtualMachine.
func (mr *MockCmdServerMockRecorder) SyncVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).SyncVirtualMachine), arg0, arg1)
}
// SyncVirtualMachineCPUs mocks base method.
func (m *MockCmdServer) SyncVirtualMachineCPUs(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncVirtualMachineCPUs", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncVirtualMachineCPUs indicates an expected call of SyncVirtualMachineCPUs.
func (mr *MockCmdServerMockRecorder) SyncVirtualMachineCPUs(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncVirtualMachineCPUs", reflect.TypeOf((*MockCmdServer)(nil).SyncVirtualMachineCPUs), arg0, arg1)
}
// SyncVirtualMachineMemory mocks base method.
func (m *MockCmdServer) SyncVirtualMachineMemory(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncVirtualMachineMemory", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncVirtualMachineMemory indicates an expected call of SyncVirtualMachineMemory.
func (mr *MockCmdServerMockRecorder) SyncVirtualMachineMemory(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncVirtualMachineMemory", reflect.TypeOf((*MockCmdServer)(nil).SyncVirtualMachineMemory), arg0, arg1)
}
// UnfreezeVirtualMachine mocks base method.
func (m *MockCmdServer) UnfreezeVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UnfreezeVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UnfreezeVirtualMachine indicates an expected call of UnfreezeVirtualMachine.
func (mr *MockCmdServerMockRecorder) UnfreezeVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnfreezeVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).UnfreezeVirtualMachine), arg0, arg1)
}
// UnpauseVirtualMachine mocks base method.
func (m *MockCmdServer) UnpauseVirtualMachine(arg0 context.Context, arg1 *VMIRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UnpauseVirtualMachine", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UnpauseVirtualMachine indicates an expected call of UnpauseVirtualMachine.
func (mr *MockCmdServerMockRecorder) UnpauseVirtualMachine(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnpauseVirtualMachine", reflect.TypeOf((*MockCmdServer)(nil).UnpauseVirtualMachine), arg0, arg1)
}
// VirtualMachineMemoryDump mocks base method.
func (m *MockCmdServer) VirtualMachineMemoryDump(arg0 context.Context, arg1 *MemoryDumpRequest) (*Response, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VirtualMachineMemoryDump", arg0, arg1)
ret0, _ := ret[0].(*Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VirtualMachineMemoryDump indicates an expected call of VirtualMachineMemoryDump.
func (mr *MockCmdServerMockRecorder) VirtualMachineMemoryDump(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VirtualMachineMemoryDump", reflect.TypeOf((*MockCmdServer)(nil).VirtualMachineMemoryDump), arg0, arg1)
}
// Code generated by MockGen. DO NOT EDIT.
// Source: manager.go
//
// Generated by this command:
//
// mockgen -source manager.go -package=hooks -destination=generated_mock_manager.go
//
// Package hooks is a generated GoMock package.
package hooks
import (
reflect "reflect"
time "time"
gomock "go.uber.org/mock/gomock"
v1 "kubevirt.io/api/core/v1"
cloudinit "kubevirt.io/kubevirt/pkg/cloud-init"
api "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
isgomock struct{}
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// Collect mocks base method.
func (m *MockManager) Collect(arg0 uint, arg1 time.Duration) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Collect", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Collect indicates an expected call of Collect.
func (mr *MockManagerMockRecorder) Collect(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Collect", reflect.TypeOf((*MockManager)(nil).Collect), arg0, arg1)
}
// OnDefineDomain mocks base method.
func (m *MockManager) OnDefineDomain(arg0 *api.DomainSpec, arg1 *v1.VirtualMachineInstance) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "OnDefineDomain", arg0, arg1)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// OnDefineDomain indicates an expected call of OnDefineDomain.
func (mr *MockManagerMockRecorder) OnDefineDomain(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnDefineDomain", reflect.TypeOf((*MockManager)(nil).OnDefineDomain), arg0, arg1)
}
// PreCloudInitIso mocks base method.
func (m *MockManager) PreCloudInitIso(arg0 *v1.VirtualMachineInstance, arg1 *cloudinit.CloudInitData) (*cloudinit.CloudInitData, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PreCloudInitIso", arg0, arg1)
ret0, _ := ret[0].(*cloudinit.CloudInitData)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PreCloudInitIso indicates an expected call of PreCloudInitIso.
func (mr *MockManagerMockRecorder) PreCloudInitIso(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreCloudInitIso", reflect.TypeOf((*MockManager)(nil).PreCloudInitIso), arg0, arg1)
}
// Shutdown mocks base method.
func (m *MockManager) Shutdown() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Shutdown")
ret0, _ := ret[0].(error)
return ret0
}
// Shutdown indicates an expected call of Shutdown.
func (mr *MockManagerMockRecorder) Shutdown() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockManager)(nil).Shutdown))
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package hooks
import (
"encoding/json"
k8sv1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
)
const HookSidecarListAnnotationName = "hooks.kubevirt.io/hookSidecars"
const HookSocketsSharedDirectory = "/var/run/kubevirt-hooks"
const ContainerNameEnvVar = "CONTAINER_NAME"
type HookSidecarList []HookSidecar
type ConfigMap struct {
Name string `json:"name"`
Key string `json:"key"`
HookPath string `json:"hookPath"`
}
type PVC struct {
Name string `json:"name"`
VolumePath string `json:"volumePath"`
SharedComputePath string `json:"sharedComputePath"`
}
type HookSidecar struct {
Image string `json:"image,omitempty"`
ImagePullPolicy k8sv1.PullPolicy `json:"imagePullPolicy"`
Command []string `json:"command,omitempty"`
Args []string `json:"args,omitempty"`
ConfigMap *ConfigMap `json:"configMap,omitempty"`
PVC *PVC `json:"pvc,omitempty"`
DownwardAPI v1.NetworkBindingDownwardAPIType `json:"-"`
}
func UnmarshalHookSidecarList(vmiObject *v1.VirtualMachineInstance) (HookSidecarList, error) {
hookSidecarList := make(HookSidecarList, 0)
if rawRequestedHookSidecarList, requestedHookSidecarListDefined := vmiObject.GetAnnotations()[HookSidecarListAnnotationName]; requestedHookSidecarListDefined {
if err := json.Unmarshal([]byte(rawRequestedHookSidecarList), &hookSidecarList); err != nil {
return nil, err
}
}
return hookSidecarList, nil
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: api_info.proto
/*
Package info is a generated protocol buffer package.
It is generated from these files:
api_info.proto
It has these top-level messages:
InfoParams
InfoResult
HookPoint
*/
package info
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type InfoParams struct {
}
func (m *InfoParams) Reset() { *m = InfoParams{} }
func (m *InfoParams) String() string { return proto.CompactTextString(m) }
func (*InfoParams) ProtoMessage() {}
func (*InfoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type InfoResult struct {
// name of the hook used by virt-launcher to compare it with requested hooks
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// hookPoints is a list of subscribed hook points
HookPoints []*HookPoint `protobuf:"bytes,3,rep,name=hookPoints" json:"hookPoints,omitempty"`
// versions is a list of implemented hook Callbacks service versions
Versions []string `protobuf:"bytes,4,rep,name=versions" json:"versions,omitempty"`
}
func (m *InfoResult) Reset() { *m = InfoResult{} }
func (m *InfoResult) String() string { return proto.CompactTextString(m) }
func (*InfoResult) ProtoMessage() {}
func (*InfoResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *InfoResult) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *InfoResult) GetHookPoints() []*HookPoint {
if m != nil {
return m.HookPoints
}
return nil
}
func (m *InfoResult) GetVersions() []string {
if m != nil {
return m.Versions
}
return nil
}
type HookPoint struct {
// name represents name of the subscribed hook point
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// priority is used to sort hooks prior to their execution (second key is the name)
Priority int32 `protobuf:"varint,2,opt,name=priority" json:"priority,omitempty"`
}
func (m *HookPoint) Reset() { *m = HookPoint{} }
func (m *HookPoint) String() string { return proto.CompactTextString(m) }
func (*HookPoint) ProtoMessage() {}
func (*HookPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *HookPoint) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *HookPoint) GetPriority() int32 {
if m != nil {
return m.Priority
}
return 0
}
func init() {
proto.RegisterType((*InfoParams)(nil), "kubevirt.hooks.info.InfoParams")
proto.RegisterType((*InfoResult)(nil), "kubevirt.hooks.info.InfoResult")
proto.RegisterType((*HookPoint)(nil), "kubevirt.hooks.info.HookPoint")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Info service
type InfoClient interface {
Info(ctx context.Context, in *InfoParams, opts ...grpc.CallOption) (*InfoResult, error)
}
type infoClient struct {
cc *grpc.ClientConn
}
func NewInfoClient(cc *grpc.ClientConn) InfoClient {
return &infoClient{cc}
}
func (c *infoClient) Info(ctx context.Context, in *InfoParams, opts ...grpc.CallOption) (*InfoResult, error) {
out := new(InfoResult)
err := grpc.Invoke(ctx, "/kubevirt.hooks.info.Info/Info", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Info service
type InfoServer interface {
Info(context.Context, *InfoParams) (*InfoResult, error)
}
func RegisterInfoServer(s *grpc.Server, srv InfoServer) {
s.RegisterService(&_Info_serviceDesc, srv)
}
func _Info_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InfoParams)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(InfoServer).Info(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.hooks.info.Info/Info",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(InfoServer).Info(ctx, req.(*InfoParams))
}
return interceptor(ctx, in, info, handler)
}
var _Info_serviceDesc = grpc.ServiceDesc{
ServiceName: "kubevirt.hooks.info.Info",
HandlerType: (*InfoServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Info",
Handler: _Info_Info_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "api_info.proto",
}
func init() { proto.RegisterFile("api_info.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 205 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x2c, 0xc8, 0x8c,
0xcf, 0xcc, 0x4b, 0xcb, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xce, 0x2e, 0x4d, 0x4a,
0x2d, 0xcb, 0x2c, 0x2a, 0xd1, 0xcb, 0xc8, 0xcf, 0xcf, 0x2e, 0xd6, 0x03, 0x49, 0x29, 0xf1, 0x70,
0x71, 0x79, 0xe6, 0xa5, 0xe5, 0x07, 0x24, 0x16, 0x25, 0xe6, 0x16, 0x2b, 0xd5, 0x40, 0x78, 0x41,
0xa9, 0xc5, 0xa5, 0x39, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a,
0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x90, 0x1d, 0x17, 0x17, 0x48, 0x77, 0x40, 0x7e, 0x66, 0x5e,
0x49, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x9c, 0x1e, 0x16, 0x93, 0xf5, 0x3c, 0x60,
0xca, 0x82, 0x90, 0x74, 0x08, 0x49, 0x71, 0x71, 0x94, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0x15,
0x4b, 0xb0, 0x28, 0x30, 0x6b, 0x70, 0x06, 0xc1, 0xf9, 0x4a, 0xd6, 0x5c, 0x9c, 0x70, 0x4d, 0x58,
0x2d, 0x97, 0xe2, 0xe2, 0x28, 0x28, 0xca, 0xcc, 0x2f, 0xca, 0x2c, 0xa9, 0x94, 0x60, 0x52, 0x60,
0xd4, 0x60, 0x0d, 0x82, 0xf3, 0x8d, 0x02, 0xb8, 0x58, 0x40, 0x4e, 0x17, 0xf2, 0x80, 0xd2, 0xf2,
0x58, 0x1d, 0x85, 0xf0, 0xab, 0x14, 0x6e, 0x05, 0x10, 0xef, 0x27, 0xb1, 0x81, 0x83, 0xcd, 0x18,
0x10, 0x00, 0x00, 0xff, 0xff, 0xe2, 0x38, 0x98, 0xcf, 0x48, 0x01, 0x00, 0x00,
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package hooks
import (
"context"
"encoding/json"
"encoding/xml"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
cloudinit "kubevirt.io/kubevirt/pkg/cloud-init"
hooksInfo "kubevirt.io/kubevirt/pkg/hooks/info"
hooksV1alpha1 "kubevirt.io/kubevirt/pkg/hooks/v1alpha1"
hooksV1alpha2 "kubevirt.io/kubevirt/pkg/hooks/v1alpha2"
hooksV1alpha3 "kubevirt.io/kubevirt/pkg/hooks/v1alpha3"
grpcutil "kubevirt.io/kubevirt/pkg/util/net/grpc"
virtwrapApi "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
//go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE
const dialSockErr = "Failed to Dial hook socket: %s"
type callBackClient struct {
SocketPath string
Version string
subscribedHookPoints []*hooksInfo.HookPoint
}
var manager Manager
var once sync.Once
type (
Manager interface {
Collect(uint, time.Duration) error
OnDefineDomain(*virtwrapApi.DomainSpec, *v1.VirtualMachineInstance) (string, error)
PreCloudInitIso(*v1.VirtualMachineInstance, *cloudinit.CloudInitData) (*cloudinit.CloudInitData, error)
Shutdown() error
}
hookManager struct {
CallbacksPerHookPoint map[string][]*callBackClient
hookSocketSharedDirectory string
}
)
func GetManager() Manager {
once.Do(func() {
manager = newManager(HookSocketsSharedDirectory)
})
return manager
}
func newManager(baseDir string) *hookManager {
return &hookManager{CallbacksPerHookPoint: make(map[string][]*callBackClient), hookSocketSharedDirectory: baseDir}
}
func (m *hookManager) Collect(numberOfRequestedHookSidecars uint, timeout time.Duration) error {
callbacksPerHookPoint, err := m.collectSideCarSockets(numberOfRequestedHookSidecars, timeout)
if err != nil {
return err
}
log.Log.Info("Collected all requested hook sidecar sockets")
sortCallbacksPerHookPoint(callbacksPerHookPoint)
log.Log.Infof("Sorted all collected sidecar sockets per hook point based on their priority and name: %v", callbacksPerHookPoint)
m.CallbacksPerHookPoint = callbacksPerHookPoint
return nil
}
// TODO: Handle sockets in parallel, when a socket appears, run a goroutine trying to read Info from it
func (m *hookManager) collectSideCarSockets(numberOfRequestedHookSidecars uint, timeout time.Duration) (map[string][]*callBackClient, error) {
callbacksPerHookPoint := make(map[string][]*callBackClient)
processedSockets := make(map[string]bool)
timeoutCh := time.After(timeout)
ticker := time.NewTicker(300 * time.Millisecond)
for uint(len(processedSockets)) < numberOfRequestedHookSidecars {
entries, err := os.ReadDir(m.hookSocketSharedDirectory)
if err != nil {
return nil, err
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
subPath := filepath.Join(m.hookSocketSharedDirectory, entry.Name())
subEntries, err := os.ReadDir(subPath)
if err != nil {
return nil, err
}
for _, subEntry := range subEntries {
if subEntry.IsDir() {
continue
}
if _, processed := processedSockets[subEntry.Name()]; processed {
continue
}
notReady, err := handleSidecarSocket(filepath.Join(subPath, subEntry.Name()), callbacksPerHookPoint)
if err != nil {
return nil, err
}
if notReady {
continue
}
processedSockets[subEntry.Name()] = true
}
}
select {
case <-timeoutCh:
return nil, fmt.Errorf("Failed to collect all expected sidecar hook sockets within given timeout")
case <-ticker.C:
}
}
return callbacksPerHookPoint, nil
}
func handleSidecarSocket(filePath string, callbacksPerHookPoint map[string][]*callBackClient) (bool, error) {
callBackClient, notReady, err := processSideCarSocket(filePath)
if err != nil {
log.Log.Reason(err).Infof("Failed to process sidecar socket: %s", filePath)
return false, err
}
if notReady {
log.Log.Infof("Sidecar server might not be ready yet: %s", filePath)
return true, nil
}
for _, subscribedHookPoint := range callBackClient.subscribedHookPoints {
callbacksPerHookPoint[subscribedHookPoint.GetName()] = append(callbacksPerHookPoint[subscribedHookPoint.GetName()], callBackClient)
}
return false, nil
}
func processSideCarSocket(socketPath string) (*callBackClient, bool, error) {
conn, err := grpcutil.DialSocketWithTimeout(socketPath, 1)
if err != nil {
log.Log.Reason(err).Infof(dialSockErr, socketPath)
return nil, true, nil
}
defer conn.Close()
infoClient := hooksInfo.NewInfoClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
info, err := infoClient.Info(ctx, &hooksInfo.InfoParams{})
if err != nil {
return nil, false, err
}
versionsSet := make(map[string]bool)
for _, version := range info.GetVersions() {
versionsSet[version] = true
}
// The order matters. We should match newer versions first.
supportedVersions := []string{
hooksV1alpha3.Version,
hooksV1alpha2.Version,
hooksV1alpha1.Version,
}
for _, version := range supportedVersions {
if _, found := versionsSet[version]; found {
return &callBackClient{
SocketPath: socketPath,
Version: version,
subscribedHookPoints: info.GetHookPoints(),
}, false, nil
}
}
return nil, false,
fmt.Errorf("Hook sidecar does not expose a supported version. Exposed versions: %v, supported versions: %v",
info.GetVersions(), supportedVersions)
}
func sortCallbacksPerHookPoint(callbacksPerHookPoint map[string][]*callBackClient) {
for _, callbacks := range callbacksPerHookPoint {
for _, callback := range callbacks {
sort.Slice(callback.subscribedHookPoints, func(i, j int) bool {
if callback.subscribedHookPoints[i].Priority == callback.subscribedHookPoints[j].Priority {
return strings.Compare(callback.subscribedHookPoints[i].Name, callback.subscribedHookPoints[j].Name) < 0
} else {
return callback.subscribedHookPoints[i].Priority > callback.subscribedHookPoints[j].Priority
}
})
}
}
}
func (m *hookManager) OnDefineDomain(domainSpec *virtwrapApi.DomainSpec, vmi *v1.VirtualMachineInstance) (string, error) {
domainSpecXML, err := xml.MarshalIndent(domainSpec, "", "\t")
if err != nil {
return "", fmt.Errorf("Failed to marshal domain spec: %v", domainSpec)
}
callbacks, found := m.CallbacksPerHookPoint[hooksInfo.OnDefineDomainHookPointName]
if !found {
return string(domainSpecXML), nil
}
vmiJSON, err := json.Marshal(vmi)
if err != nil {
return "", fmt.Errorf("failed to marshal VMI spec: %v, err: %v", vmi, err)
}
for _, callback := range callbacks {
domainSpecXML, err = m.onDefineDomainCallback(callback, domainSpecXML, vmiJSON)
if err != nil {
return "", err
}
}
return string(domainSpecXML), nil
}
func (m *hookManager) onDefineDomainCallback(callback *callBackClient, domainSpecXML, vmiJSON []byte) ([]byte, error) {
conn, err := grpcutil.DialSocketWithTimeout(callback.SocketPath, 1)
if err != nil {
log.Log.Reason(err).Errorf(dialSockErr, callback.SocketPath)
return nil, err
}
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
switch callback.Version {
case hooksV1alpha1.Version:
client := hooksV1alpha1.NewCallbacksClient(conn)
result, err := client.OnDefineDomain(ctx, &hooksV1alpha1.OnDefineDomainParams{
DomainXML: domainSpecXML,
Vmi: vmiJSON,
})
if err != nil {
log.Log.Reason(err).Error("Failed to call OnDefineDomain")
return nil, err
}
domainSpecXML = result.GetDomainXML()
case hooksV1alpha2.Version:
client := hooksV1alpha2.NewCallbacksClient(conn)
result, err := client.OnDefineDomain(ctx, &hooksV1alpha2.OnDefineDomainParams{
DomainXML: domainSpecXML,
Vmi: vmiJSON,
})
if err != nil {
log.Log.Reason(err).Error("Failed to call OnDefineDomain")
return nil, err
}
domainSpecXML = result.GetDomainXML()
case hooksV1alpha3.Version:
client := hooksV1alpha3.NewCallbacksClient(conn)
result, err := client.OnDefineDomain(ctx, &hooksV1alpha3.OnDefineDomainParams{
DomainXML: domainSpecXML,
Vmi: vmiJSON,
})
if err != nil {
log.Log.Reason(err).Error("Failed to call OnDefineDomain")
return nil, err
}
domainSpecXML = result.GetDomainXML()
default:
log.Log.Errorf("Unsupported callback version: %s", callback.Version)
}
return domainSpecXML, nil
}
func preCloudInitIsoDataToJSON(vmi *v1.VirtualMachineInstance, cloudInitData *cloudinit.CloudInitData) ([]byte, []byte, []byte, error) {
vmiJSON, err := json.Marshal(vmi)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to marshal VMI spec, err: %v", err)
}
// To be backward compatible to sidecar hooks still expecting to receive the cloudinit data as a
// CloudInitNoCloudSource object,
// we need to construct a CloudInitNoCloudSource object with the user- and networkdata from the
// cloudInitData object.
cloudInitNoCloudSource := v1.CloudInitNoCloudSource{
UserData: cloudInitData.UserData,
NetworkData: cloudInitData.NetworkData,
}
cloudInitNoCloudSourceJSON, err := json.Marshal(cloudInitNoCloudSource)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to marshal CloudInitNoCloudSource: %v, err: %v", cloudInitNoCloudSource, err)
}
cloudInitDataJSON, err := json.Marshal(cloudInitData)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to marshal CloudInitData, err: %v", err)
}
return cloudInitDataJSON, cloudInitNoCloudSourceJSON, vmiJSON, nil
}
func preCloudInitIsoValidateResult(dataSource cloudinit.DataSourceType, initData, noCloudSource []byte) (*cloudinit.CloudInitData, error) {
var resultData *cloudinit.CloudInitData
if err := json.Unmarshal(initData, &resultData); err != nil {
log.Log.Reason(err).Error("Failed to unmarshal CloudInitData result")
return nil, err
}
if !cloudinit.IsValidCloudInitData(resultData) {
// Be backwards compatible for hook sidecars still working on CloudInitNoCloudSource objects instead of CloudInitData
var resultNoCloudSourceData *v1.CloudInitNoCloudSource
if err := json.Unmarshal(noCloudSource, &resultNoCloudSourceData); err != nil {
log.Log.Reason(err).Error("Failed to unmarshal CloudInitNoCloudSource result")
return nil, err
}
resultData = &cloudinit.CloudInitData{
DataSource: dataSource,
UserData: resultNoCloudSourceData.UserData,
NetworkData: resultNoCloudSourceData.NetworkData,
}
}
return resultData, nil
}
func (m *hookManager) PreCloudInitIso(vmi *v1.VirtualMachineInstance, cloudInitData *cloudinit.CloudInitData) (*cloudinit.CloudInitData, error) {
callbacks, found := m.CallbacksPerHookPoint[hooksInfo.PreCloudInitIsoHookPointName]
if !found {
return cloudInitData, nil
}
cloudInitDataJSON, cloudInitNoCloudSourceJSON, vmiJSON, err := preCloudInitIsoDataToJSON(vmi, cloudInitData)
if err != nil {
log.Log.Reason(err).Error("Failed to run PreCloudInitIso")
return cloudInitData, err
}
for _, callback := range callbacks {
switch callback.Version {
case hooksV1alpha2.Version:
conn, err := grpcutil.DialSocketWithTimeout(callback.SocketPath, 1)
if err != nil {
log.Log.Reason(err).Errorf(dialSockErr, callback.SocketPath)
return cloudInitData, err
}
defer conn.Close()
client := hooksV1alpha2.NewCallbacksClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
result, err := client.PreCloudInitIso(ctx, &hooksV1alpha2.PreCloudInitIsoParams{
CloudInitData: cloudInitDataJSON,
CloudInitNoCloudSource: cloudInitNoCloudSourceJSON,
Vmi: vmiJSON,
})
if err != nil {
log.Log.Reason(err).Error("Failed to call PreCloudInitIso")
return cloudInitData, err
}
return preCloudInitIsoValidateResult(cloudInitData.DataSource, result.GetCloudInitData(), result.GetCloudInitNoCloudSource())
case hooksV1alpha3.Version:
conn, err := grpcutil.DialSocketWithTimeout(callback.SocketPath, 1)
if err != nil {
log.Log.Reason(err).Errorf(dialSockErr, callback.SocketPath)
return cloudInitData, err
}
defer conn.Close()
client := hooksV1alpha3.NewCallbacksClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
result, err := client.PreCloudInitIso(ctx, &hooksV1alpha3.PreCloudInitIsoParams{
CloudInitData: cloudInitDataJSON,
CloudInitNoCloudSource: cloudInitNoCloudSourceJSON,
Vmi: vmiJSON,
})
if err != nil {
log.Log.Reason(err).Error("Failed to call PreCloudInitIso")
return cloudInitData, err
}
return preCloudInitIsoValidateResult(cloudInitData.DataSource, result.GetCloudInitData(), result.GetCloudInitNoCloudSource())
default:
log.Log.Errorf("Unsupported callback version: %s", callback.Version)
}
}
return cloudInitData, nil
}
func (m *hookManager) Shutdown() error {
callbacks, found := m.CallbacksPerHookPoint[hooksInfo.ShutdownHookPointName]
if !found {
return nil
}
for _, callback := range callbacks {
switch callback.Version {
case hooksV1alpha3.Version:
conn, err := grpcutil.DialSocketWithTimeout(callback.SocketPath, 1)
if err != nil {
log.Log.Reason(err).Error("Failed to run Shutdown")
return err
}
defer conn.Close()
client := hooksV1alpha3.NewCallbacksClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
if _, err := client.Shutdown(ctx, &hooksV1alpha3.ShutdownParams{}); err != nil {
log.Log.Reason(err).Error("Failed to run Shutdown")
return err
}
default:
log.Log.Errorf("Unsupported callback version: %s", callback.Version)
}
}
return nil
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: api_v1alpha1.proto
/*
Package v1alpha1 is a generated protocol buffer package.
It is generated from these files:
api_v1alpha1.proto
It has these top-level messages:
OnDefineDomainParams
OnDefineDomainResult
*/
package v1alpha1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type OnDefineDomainParams struct {
// domainXML is original libvirt domain specification
DomainXML []byte `protobuf:"bytes,1,opt,name=domainXML,proto3" json:"domainXML,omitempty"`
// vmi is VirtualMachineInstance is object of virtual machine currently processed by virt-launcher, it is encoded as JSON
Vmi []byte `protobuf:"bytes,2,opt,name=vmi,proto3" json:"vmi,omitempty"`
}
func (m *OnDefineDomainParams) Reset() { *m = OnDefineDomainParams{} }
func (m *OnDefineDomainParams) String() string { return proto.CompactTextString(m) }
func (*OnDefineDomainParams) ProtoMessage() {}
func (*OnDefineDomainParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *OnDefineDomainParams) GetDomainXML() []byte {
if m != nil {
return m.DomainXML
}
return nil
}
func (m *OnDefineDomainParams) GetVmi() []byte {
if m != nil {
return m.Vmi
}
return nil
}
type OnDefineDomainResult struct {
// domainXML is processed libvirt domain specification
DomainXML []byte `protobuf:"bytes,1,opt,name=domainXML,proto3" json:"domainXML,omitempty"`
}
func (m *OnDefineDomainResult) Reset() { *m = OnDefineDomainResult{} }
func (m *OnDefineDomainResult) String() string { return proto.CompactTextString(m) }
func (*OnDefineDomainResult) ProtoMessage() {}
func (*OnDefineDomainResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *OnDefineDomainResult) GetDomainXML() []byte {
if m != nil {
return m.DomainXML
}
return nil
}
func init() {
proto.RegisterType((*OnDefineDomainParams)(nil), "kubevirt.hooks.v1alpha1.OnDefineDomainParams")
proto.RegisterType((*OnDefineDomainResult)(nil), "kubevirt.hooks.v1alpha1.OnDefineDomainResult")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Callbacks service
type CallbacksClient interface {
OnDefineDomain(ctx context.Context, in *OnDefineDomainParams, opts ...grpc.CallOption) (*OnDefineDomainResult, error)
}
type callbacksClient struct {
cc *grpc.ClientConn
}
func NewCallbacksClient(cc *grpc.ClientConn) CallbacksClient {
return &callbacksClient{cc}
}
func (c *callbacksClient) OnDefineDomain(ctx context.Context, in *OnDefineDomainParams, opts ...grpc.CallOption) (*OnDefineDomainResult, error) {
out := new(OnDefineDomainResult)
err := grpc.Invoke(ctx, "/kubevirt.hooks.v1alpha1.Callbacks/OnDefineDomain", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Callbacks service
type CallbacksServer interface {
OnDefineDomain(context.Context, *OnDefineDomainParams) (*OnDefineDomainResult, error)
}
func RegisterCallbacksServer(s *grpc.Server, srv CallbacksServer) {
s.RegisterService(&_Callbacks_serviceDesc, srv)
}
func _Callbacks_OnDefineDomain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(OnDefineDomainParams)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CallbacksServer).OnDefineDomain(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.hooks.v1alpha1.Callbacks/OnDefineDomain",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CallbacksServer).OnDefineDomain(ctx, req.(*OnDefineDomainParams))
}
return interceptor(ctx, in, info, handler)
}
var _Callbacks_serviceDesc = grpc.ServiceDesc{
ServiceName: "kubevirt.hooks.v1alpha1.Callbacks",
HandlerType: (*CallbacksServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "OnDefineDomain",
Handler: _Callbacks_OnDefineDomain_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "api_v1alpha1.proto",
}
func init() { proto.RegisterFile("api_v1alpha1.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 170 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0x2c, 0xc8, 0x8c,
0x2f, 0x33, 0x4c, 0xcc, 0x29, 0xc8, 0x48, 0x34, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12,
0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xcb, 0x2c, 0x2a, 0xd1, 0xcb, 0xc8, 0xcf, 0xcf, 0x2e, 0xd6, 0x83,
0x49, 0x2b, 0xb9, 0x71, 0x89, 0xf8, 0xe7, 0xb9, 0xa4, 0xa6, 0x65, 0xe6, 0xa5, 0xba, 0xe4, 0xe7,
0x26, 0x66, 0xe6, 0x05, 0x24, 0x16, 0x25, 0xe6, 0x16, 0x0b, 0xc9, 0x70, 0x71, 0xa6, 0x80, 0xf9,
0x11, 0xbe, 0x3e, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x08, 0x01, 0x21, 0x01, 0x2e, 0xe6,
0xb2, 0xdc, 0x4c, 0x09, 0x26, 0xb0, 0x38, 0x88, 0xa9, 0x64, 0x82, 0x6e, 0x4e, 0x50, 0x6a, 0x71,
0x69, 0x4e, 0x09, 0x7e, 0x73, 0x8c, 0xaa, 0xb9, 0x38, 0x9d, 0x13, 0x73, 0x72, 0x92, 0x12, 0x93,
0xb3, 0x8b, 0x85, 0xf2, 0xb8, 0xf8, 0x50, 0x8d, 0x10, 0xd2, 0xd5, 0xc3, 0xe1, 0x6c, 0x3d, 0x6c,
0x6e, 0x96, 0x22, 0x56, 0x39, 0xc4, 0x69, 0x49, 0x6c, 0xe0, 0xa0, 0x31, 0x06, 0x04, 0x00, 0x00,
0xff, 0xff, 0x46, 0xa1, 0x62, 0x72, 0x30, 0x01, 0x00, 0x00,
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: api_v1alpha2.proto
/*
Package v1alpha2 is a generated protocol buffer package.
It is generated from these files:
api_v1alpha2.proto
It has these top-level messages:
OnDefineDomainParams
OnDefineDomainResult
PreCloudInitIsoParams
PreCloudInitIsoResult
*/
package v1alpha2
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type OnDefineDomainParams struct {
// domainXML is original libvirt domain specification
DomainXML []byte `protobuf:"bytes,1,opt,name=domainXML,proto3" json:"domainXML,omitempty"`
// vmi is VirtualMachineInstance is object of virtual machine currently processed by virt-launcher, it is encoded as JSON
Vmi []byte `protobuf:"bytes,2,opt,name=vmi,proto3" json:"vmi,omitempty"`
}
func (m *OnDefineDomainParams) Reset() { *m = OnDefineDomainParams{} }
func (m *OnDefineDomainParams) String() string { return proto.CompactTextString(m) }
func (*OnDefineDomainParams) ProtoMessage() {}
func (*OnDefineDomainParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *OnDefineDomainParams) GetDomainXML() []byte {
if m != nil {
return m.DomainXML
}
return nil
}
func (m *OnDefineDomainParams) GetVmi() []byte {
if m != nil {
return m.Vmi
}
return nil
}
type OnDefineDomainResult struct {
// domainXML is processed libvirt domain specification
DomainXML []byte `protobuf:"bytes,1,opt,name=domainXML,proto3" json:"domainXML,omitempty"`
}
func (m *OnDefineDomainResult) Reset() { *m = OnDefineDomainResult{} }
func (m *OnDefineDomainResult) String() string { return proto.CompactTextString(m) }
func (*OnDefineDomainResult) ProtoMessage() {}
func (*OnDefineDomainResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *OnDefineDomainResult) GetDomainXML() []byte {
if m != nil {
return m.DomainXML
}
return nil
}
type PreCloudInitIsoParams struct {
// cloudInitNoCloudSource is an object of CloudInitNoCloudSource encoded as JSON
// This is a legacy field to ensure backwards compatibility. New code should use cloudInitData instead.
CloudInitNoCloudSource []byte `protobuf:"bytes,1,opt,name=cloudInitNoCloudSource,proto3" json:"cloudInitNoCloudSource,omitempty"`
// vmi is VirtualMachineInstance is object of virtual machine currently processed by virt-launcher, it is encoded as JSON
Vmi []byte `protobuf:"bytes,2,opt,name=vmi,proto3" json:"vmi,omitempty"`
// cloudInitData is an object of CloudInitData encoded as JSON
CloudInitData []byte `protobuf:"bytes,3,opt,name=cloudInitData,proto3" json:"cloudInitData,omitempty"`
}
func (m *PreCloudInitIsoParams) Reset() { *m = PreCloudInitIsoParams{} }
func (m *PreCloudInitIsoParams) String() string { return proto.CompactTextString(m) }
func (*PreCloudInitIsoParams) ProtoMessage() {}
func (*PreCloudInitIsoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *PreCloudInitIsoParams) GetCloudInitNoCloudSource() []byte {
if m != nil {
return m.CloudInitNoCloudSource
}
return nil
}
func (m *PreCloudInitIsoParams) GetVmi() []byte {
if m != nil {
return m.Vmi
}
return nil
}
func (m *PreCloudInitIsoParams) GetCloudInitData() []byte {
if m != nil {
return m.CloudInitData
}
return nil
}
type PreCloudInitIsoResult struct {
// cloudInitNoCloudSource is an object of CloudInitNoCloudSource encoded as JSON
// This is a legacy field to ensure backwards compatibility. New code should use cloudInitData instead.
CloudInitNoCloudSource []byte `protobuf:"bytes,1,opt,name=cloudInitNoCloudSource,proto3" json:"cloudInitNoCloudSource,omitempty"`
// cloudInitData is an object of CloudInitData encoded as JSON
CloudInitData []byte `protobuf:"bytes,3,opt,name=cloudInitData,proto3" json:"cloudInitData,omitempty"`
}
func (m *PreCloudInitIsoResult) Reset() { *m = PreCloudInitIsoResult{} }
func (m *PreCloudInitIsoResult) String() string { return proto.CompactTextString(m) }
func (*PreCloudInitIsoResult) ProtoMessage() {}
func (*PreCloudInitIsoResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *PreCloudInitIsoResult) GetCloudInitNoCloudSource() []byte {
if m != nil {
return m.CloudInitNoCloudSource
}
return nil
}
func (m *PreCloudInitIsoResult) GetCloudInitData() []byte {
if m != nil {
return m.CloudInitData
}
return nil
}
func init() {
proto.RegisterType((*OnDefineDomainParams)(nil), "kubevirt.hooks.v1alpha2.OnDefineDomainParams")
proto.RegisterType((*OnDefineDomainResult)(nil), "kubevirt.hooks.v1alpha2.OnDefineDomainResult")
proto.RegisterType((*PreCloudInitIsoParams)(nil), "kubevirt.hooks.v1alpha2.PreCloudInitIsoParams")
proto.RegisterType((*PreCloudInitIsoResult)(nil), "kubevirt.hooks.v1alpha2.PreCloudInitIsoResult")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Callbacks service
type CallbacksClient interface {
OnDefineDomain(ctx context.Context, in *OnDefineDomainParams, opts ...grpc.CallOption) (*OnDefineDomainResult, error)
PreCloudInitIso(ctx context.Context, in *PreCloudInitIsoParams, opts ...grpc.CallOption) (*PreCloudInitIsoResult, error)
}
type callbacksClient struct {
cc *grpc.ClientConn
}
func NewCallbacksClient(cc *grpc.ClientConn) CallbacksClient {
return &callbacksClient{cc}
}
func (c *callbacksClient) OnDefineDomain(ctx context.Context, in *OnDefineDomainParams, opts ...grpc.CallOption) (*OnDefineDomainResult, error) {
out := new(OnDefineDomainResult)
err := grpc.Invoke(ctx, "/kubevirt.hooks.v1alpha2.Callbacks/OnDefineDomain", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *callbacksClient) PreCloudInitIso(ctx context.Context, in *PreCloudInitIsoParams, opts ...grpc.CallOption) (*PreCloudInitIsoResult, error) {
out := new(PreCloudInitIsoResult)
err := grpc.Invoke(ctx, "/kubevirt.hooks.v1alpha2.Callbacks/PreCloudInitIso", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Callbacks service
type CallbacksServer interface {
OnDefineDomain(context.Context, *OnDefineDomainParams) (*OnDefineDomainResult, error)
PreCloudInitIso(context.Context, *PreCloudInitIsoParams) (*PreCloudInitIsoResult, error)
}
func RegisterCallbacksServer(s *grpc.Server, srv CallbacksServer) {
s.RegisterService(&_Callbacks_serviceDesc, srv)
}
func _Callbacks_OnDefineDomain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(OnDefineDomainParams)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CallbacksServer).OnDefineDomain(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.hooks.v1alpha2.Callbacks/OnDefineDomain",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CallbacksServer).OnDefineDomain(ctx, req.(*OnDefineDomainParams))
}
return interceptor(ctx, in, info, handler)
}
func _Callbacks_PreCloudInitIso_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PreCloudInitIsoParams)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CallbacksServer).PreCloudInitIso(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.hooks.v1alpha2.Callbacks/PreCloudInitIso",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CallbacksServer).PreCloudInitIso(ctx, req.(*PreCloudInitIsoParams))
}
return interceptor(ctx, in, info, handler)
}
var _Callbacks_serviceDesc = grpc.ServiceDesc{
ServiceName: "kubevirt.hooks.v1alpha2.Callbacks",
HandlerType: (*CallbacksServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "OnDefineDomain",
Handler: _Callbacks_OnDefineDomain_Handler,
},
{
MethodName: "PreCloudInitIso",
Handler: _Callbacks_PreCloudInitIso_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "api_v1alpha2.proto",
}
func init() { proto.RegisterFile("api_v1alpha2.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 262 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0x2c, 0xc8, 0x8c,
0x2f, 0x33, 0x4c, 0xcc, 0x29, 0xc8, 0x48, 0x34, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12,
0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xcb, 0x2c, 0x2a, 0xd1, 0xcb, 0xc8, 0xcf, 0xcf, 0x2e, 0xd6, 0x83,
0x49, 0x2b, 0xb9, 0x71, 0x89, 0xf8, 0xe7, 0xb9, 0xa4, 0xa6, 0x65, 0xe6, 0xa5, 0xba, 0xe4, 0xe7,
0x26, 0x66, 0xe6, 0x05, 0x24, 0x16, 0x25, 0xe6, 0x16, 0x0b, 0xc9, 0x70, 0x71, 0xa6, 0x80, 0xf9,
0x11, 0xbe, 0x3e, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x08, 0x01, 0x21, 0x01, 0x2e, 0xe6,
0xb2, 0xdc, 0x4c, 0x09, 0x26, 0xb0, 0x38, 0x88, 0xa9, 0x64, 0x82, 0x6e, 0x4e, 0x50, 0x6a, 0x71,
0x69, 0x4e, 0x09, 0x7e, 0x73, 0x94, 0xda, 0x19, 0xb9, 0x44, 0x03, 0x8a, 0x52, 0x9d, 0x73, 0xf2,
0x4b, 0x53, 0x3c, 0xf3, 0x32, 0x4b, 0x3c, 0x8b, 0xf3, 0xa1, 0xf6, 0x9b, 0x71, 0x89, 0x25, 0xc3,
0x44, 0xfd, 0xf2, 0xc1, 0x0a, 0x82, 0xf3, 0x4b, 0x8b, 0x92, 0x53, 0xa1, 0x86, 0xe0, 0x90, 0xc5,
0x74, 0x99, 0x90, 0x0a, 0x17, 0x2f, 0x5c, 0xad, 0x4b, 0x62, 0x49, 0xa2, 0x04, 0x33, 0x58, 0x0e,
0x55, 0x50, 0xa9, 0x14, 0xc3, 0x21, 0x50, 0x0f, 0x90, 0xeb, 0x10, 0xa2, 0xac, 0x35, 0x7a, 0xc7,
0xc8, 0xc5, 0xe9, 0x9c, 0x98, 0x93, 0x93, 0x94, 0x98, 0x9c, 0x5d, 0x2c, 0x94, 0xc7, 0xc5, 0x87,
0x1a, 0x88, 0x42, 0xba, 0x7a, 0x38, 0x22, 0x4e, 0x0f, 0x5b, 0xac, 0x49, 0x11, 0xab, 0x1c, 0xea,
0xb7, 0x42, 0x2e, 0x7e, 0x34, 0x4f, 0x0b, 0xe9, 0xe1, 0x34, 0x01, 0x6b, 0x3c, 0x49, 0x11, 0xad,
0x1e, 0x62, 0x65, 0x12, 0x1b, 0x38, 0x3d, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xec, 0xaa,
0xb2, 0x0b, 0xa5, 0x02, 0x00, 0x00,
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: api_v1alpha3.proto
/*
Package v1alpha3 is a generated protocol buffer package.
It is generated from these files:
api_v1alpha3.proto
It has these top-level messages:
OnDefineDomainParams
OnDefineDomainResult
PreCloudInitIsoParams
PreCloudInitIsoResult
ShutdownParams
ShutdownResult
*/
package v1alpha3
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type OnDefineDomainParams struct {
// domainXML is original libvirt domain specification
DomainXML []byte `protobuf:"bytes,1,opt,name=domainXML,proto3" json:"domainXML,omitempty"`
// vmi is VirtualMachineInstance is object of virtual machine currently processed by virt-launcher, it is encoded as JSON
Vmi []byte `protobuf:"bytes,2,opt,name=vmi,proto3" json:"vmi,omitempty"`
}
func (m *OnDefineDomainParams) Reset() { *m = OnDefineDomainParams{} }
func (m *OnDefineDomainParams) String() string { return proto.CompactTextString(m) }
func (*OnDefineDomainParams) ProtoMessage() {}
func (*OnDefineDomainParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *OnDefineDomainParams) GetDomainXML() []byte {
if m != nil {
return m.DomainXML
}
return nil
}
func (m *OnDefineDomainParams) GetVmi() []byte {
if m != nil {
return m.Vmi
}
return nil
}
type OnDefineDomainResult struct {
// domainXML is processed libvirt domain specification
DomainXML []byte `protobuf:"bytes,1,opt,name=domainXML,proto3" json:"domainXML,omitempty"`
}
func (m *OnDefineDomainResult) Reset() { *m = OnDefineDomainResult{} }
func (m *OnDefineDomainResult) String() string { return proto.CompactTextString(m) }
func (*OnDefineDomainResult) ProtoMessage() {}
func (*OnDefineDomainResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *OnDefineDomainResult) GetDomainXML() []byte {
if m != nil {
return m.DomainXML
}
return nil
}
type PreCloudInitIsoParams struct {
// cloudInitNoCloudSource is an object of CloudInitNoCloudSource encoded as JSON
// This is a legacy field to ensure backwards compatibility. New code should use cloudInitData instead.
CloudInitNoCloudSource []byte `protobuf:"bytes,1,opt,name=cloudInitNoCloudSource,proto3" json:"cloudInitNoCloudSource,omitempty"`
// vmi is VirtualMachineInstance is object of virtual machine currently processed by virt-launcher, it is encoded as JSON
Vmi []byte `protobuf:"bytes,2,opt,name=vmi,proto3" json:"vmi,omitempty"`
// cloudInitData is an object of CloudInitData encoded as JSON
CloudInitData []byte `protobuf:"bytes,3,opt,name=cloudInitData,proto3" json:"cloudInitData,omitempty"`
}
func (m *PreCloudInitIsoParams) Reset() { *m = PreCloudInitIsoParams{} }
func (m *PreCloudInitIsoParams) String() string { return proto.CompactTextString(m) }
func (*PreCloudInitIsoParams) ProtoMessage() {}
func (*PreCloudInitIsoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *PreCloudInitIsoParams) GetCloudInitNoCloudSource() []byte {
if m != nil {
return m.CloudInitNoCloudSource
}
return nil
}
func (m *PreCloudInitIsoParams) GetVmi() []byte {
if m != nil {
return m.Vmi
}
return nil
}
func (m *PreCloudInitIsoParams) GetCloudInitData() []byte {
if m != nil {
return m.CloudInitData
}
return nil
}
type PreCloudInitIsoResult struct {
// cloudInitNoCloudSource is an object of CloudInitNoCloudSource encoded as JSON
// This is a legacy field to ensure backwards compatibility. New code should use cloudInitData instead.
CloudInitNoCloudSource []byte `protobuf:"bytes,1,opt,name=cloudInitNoCloudSource,proto3" json:"cloudInitNoCloudSource,omitempty"`
// cloudInitData is an object of CloudInitData encoded as JSON
CloudInitData []byte `protobuf:"bytes,3,opt,name=cloudInitData,proto3" json:"cloudInitData,omitempty"`
}
func (m *PreCloudInitIsoResult) Reset() { *m = PreCloudInitIsoResult{} }
func (m *PreCloudInitIsoResult) String() string { return proto.CompactTextString(m) }
func (*PreCloudInitIsoResult) ProtoMessage() {}
func (*PreCloudInitIsoResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *PreCloudInitIsoResult) GetCloudInitNoCloudSource() []byte {
if m != nil {
return m.CloudInitNoCloudSource
}
return nil
}
func (m *PreCloudInitIsoResult) GetCloudInitData() []byte {
if m != nil {
return m.CloudInitData
}
return nil
}
type ShutdownParams struct {
}
func (m *ShutdownParams) Reset() { *m = ShutdownParams{} }
func (m *ShutdownParams) String() string { return proto.CompactTextString(m) }
func (*ShutdownParams) ProtoMessage() {}
func (*ShutdownParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
type ShutdownResult struct {
}
func (m *ShutdownResult) Reset() { *m = ShutdownResult{} }
func (m *ShutdownResult) String() string { return proto.CompactTextString(m) }
func (*ShutdownResult) ProtoMessage() {}
func (*ShutdownResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func init() {
proto.RegisterType((*OnDefineDomainParams)(nil), "kubevirt.hooks.v1alpha3.OnDefineDomainParams")
proto.RegisterType((*OnDefineDomainResult)(nil), "kubevirt.hooks.v1alpha3.OnDefineDomainResult")
proto.RegisterType((*PreCloudInitIsoParams)(nil), "kubevirt.hooks.v1alpha3.PreCloudInitIsoParams")
proto.RegisterType((*PreCloudInitIsoResult)(nil), "kubevirt.hooks.v1alpha3.PreCloudInitIsoResult")
proto.RegisterType((*ShutdownParams)(nil), "kubevirt.hooks.v1alpha3.ShutdownParams")
proto.RegisterType((*ShutdownResult)(nil), "kubevirt.hooks.v1alpha3.ShutdownResult")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Callbacks service
type CallbacksClient interface {
OnDefineDomain(ctx context.Context, in *OnDefineDomainParams, opts ...grpc.CallOption) (*OnDefineDomainResult, error)
PreCloudInitIso(ctx context.Context, in *PreCloudInitIsoParams, opts ...grpc.CallOption) (*PreCloudInitIsoResult, error)
Shutdown(ctx context.Context, in *ShutdownParams, opts ...grpc.CallOption) (*ShutdownResult, error)
}
type callbacksClient struct {
cc *grpc.ClientConn
}
func NewCallbacksClient(cc *grpc.ClientConn) CallbacksClient {
return &callbacksClient{cc}
}
func (c *callbacksClient) OnDefineDomain(ctx context.Context, in *OnDefineDomainParams, opts ...grpc.CallOption) (*OnDefineDomainResult, error) {
out := new(OnDefineDomainResult)
err := grpc.Invoke(ctx, "/kubevirt.hooks.v1alpha3.Callbacks/OnDefineDomain", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *callbacksClient) PreCloudInitIso(ctx context.Context, in *PreCloudInitIsoParams, opts ...grpc.CallOption) (*PreCloudInitIsoResult, error) {
out := new(PreCloudInitIsoResult)
err := grpc.Invoke(ctx, "/kubevirt.hooks.v1alpha3.Callbacks/PreCloudInitIso", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *callbacksClient) Shutdown(ctx context.Context, in *ShutdownParams, opts ...grpc.CallOption) (*ShutdownResult, error) {
out := new(ShutdownResult)
err := grpc.Invoke(ctx, "/kubevirt.hooks.v1alpha3.Callbacks/Shutdown", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Callbacks service
type CallbacksServer interface {
OnDefineDomain(context.Context, *OnDefineDomainParams) (*OnDefineDomainResult, error)
PreCloudInitIso(context.Context, *PreCloudInitIsoParams) (*PreCloudInitIsoResult, error)
Shutdown(context.Context, *ShutdownParams) (*ShutdownResult, error)
}
func RegisterCallbacksServer(s *grpc.Server, srv CallbacksServer) {
s.RegisterService(&_Callbacks_serviceDesc, srv)
}
func _Callbacks_OnDefineDomain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(OnDefineDomainParams)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CallbacksServer).OnDefineDomain(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.hooks.v1alpha3.Callbacks/OnDefineDomain",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CallbacksServer).OnDefineDomain(ctx, req.(*OnDefineDomainParams))
}
return interceptor(ctx, in, info, handler)
}
func _Callbacks_PreCloudInitIso_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PreCloudInitIsoParams)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CallbacksServer).PreCloudInitIso(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.hooks.v1alpha3.Callbacks/PreCloudInitIso",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CallbacksServer).PreCloudInitIso(ctx, req.(*PreCloudInitIsoParams))
}
return interceptor(ctx, in, info, handler)
}
func _Callbacks_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ShutdownParams)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(CallbacksServer).Shutdown(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kubevirt.hooks.v1alpha3.Callbacks/Shutdown",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(CallbacksServer).Shutdown(ctx, req.(*ShutdownParams))
}
return interceptor(ctx, in, info, handler)
}
var _Callbacks_serviceDesc = grpc.ServiceDesc{
ServiceName: "kubevirt.hooks.v1alpha3.Callbacks",
HandlerType: (*CallbacksServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "OnDefineDomain",
Handler: _Callbacks_OnDefineDomain_Handler,
},
{
MethodName: "PreCloudInitIso",
Handler: _Callbacks_PreCloudInitIso_Handler,
},
{
MethodName: "Shutdown",
Handler: _Callbacks_Shutdown_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "api_v1alpha3.proto",
}
func init() { proto.RegisterFile("api_v1alpha3.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 296 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0x2c, 0xc8, 0x8c,
0x2f, 0x33, 0x4c, 0xcc, 0x29, 0xc8, 0x48, 0x34, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12,
0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xcb, 0x2c, 0x2a, 0xd1, 0xcb, 0xc8, 0xcf, 0xcf, 0x2e, 0xd6, 0x83,
0x49, 0x2b, 0xb9, 0x71, 0x89, 0xf8, 0xe7, 0xb9, 0xa4, 0xa6, 0x65, 0xe6, 0xa5, 0xba, 0xe4, 0xe7,
0x26, 0x66, 0xe6, 0x05, 0x24, 0x16, 0x25, 0xe6, 0x16, 0x0b, 0xc9, 0x70, 0x71, 0xa6, 0x80, 0xf9,
0x11, 0xbe, 0x3e, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x08, 0x01, 0x21, 0x01, 0x2e, 0xe6,
0xb2, 0xdc, 0x4c, 0x09, 0x26, 0xb0, 0x38, 0x88, 0xa9, 0x64, 0x82, 0x6e, 0x4e, 0x50, 0x6a, 0x71,
0x69, 0x4e, 0x09, 0x7e, 0x73, 0x94, 0xda, 0x19, 0xb9, 0x44, 0x03, 0x8a, 0x52, 0x9d, 0x73, 0xf2,
0x4b, 0x53, 0x3c, 0xf3, 0x32, 0x4b, 0x3c, 0x8b, 0xf3, 0xa1, 0xf6, 0x9b, 0x71, 0x89, 0x25, 0xc3,
0x44, 0xfd, 0xf2, 0xc1, 0x0a, 0x82, 0xf3, 0x4b, 0x8b, 0x92, 0x53, 0xa1, 0x86, 0xe0, 0x90, 0xc5,
0x74, 0x99, 0x90, 0x0a, 0x17, 0x2f, 0x5c, 0xad, 0x4b, 0x62, 0x49, 0xa2, 0x04, 0x33, 0x58, 0x0e,
0x55, 0x50, 0xa9, 0x14, 0xc3, 0x21, 0x50, 0x0f, 0x90, 0xeb, 0x10, 0xe2, 0xac, 0x15, 0xe0, 0xe2,
0x0b, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0x87, 0x06, 0x3c, 0xb2, 0x08, 0xc4, 0x05, 0x46, 0x67,
0x98, 0xb8, 0x38, 0x9d, 0x13, 0x73, 0x72, 0x92, 0x12, 0x93, 0xb3, 0x8b, 0x85, 0xf2, 0xb8, 0xf8,
0x50, 0x03, 0x5a, 0x48, 0x57, 0x0f, 0x47, 0xe4, 0xea, 0x61, 0x8b, 0x59, 0x29, 0x62, 0x95, 0x43,
0xfd, 0x5f, 0xc8, 0xc5, 0x8f, 0x16, 0x30, 0x42, 0x7a, 0x38, 0x4d, 0xc0, 0x1a, 0x97, 0x52, 0x44,
0xab, 0x87, 0x5a, 0x19, 0xc3, 0xc5, 0x01, 0x0b, 0x02, 0x21, 0x75, 0x9c, 0x7a, 0x51, 0xc3, 0x4d,
0x8a, 0xb0, 0x42, 0x88, 0xe9, 0x49, 0x6c, 0xe0, 0x1c, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff,
0x99, 0x7e, 0x92, 0xc5, 0x27, 0x03, 0x00, 0x00,
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package hostdisk
import (
"errors"
"fmt"
"os"
"path"
"path/filepath"
"syscall"
"golang.org/x/sys/unix"
"kubevirt.io/client-go/log"
ephemeraldiskutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
"kubevirt.io/kubevirt/pkg/safepath"
"kubevirt.io/kubevirt/pkg/unsafepath"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/util"
)
var pvcBaseDir = "/var/run/kubevirt-private/vmi-disks"
const (
EventReasonToleratedSmallPV = "ToleratedSmallPV"
EventTypeToleratedSmallPV = k8sv1.EventTypeNormal
)
func ReplacePVCByHostDisk(vmi *v1.VirtualMachineInstance) error {
// If PVC is defined and it's not a BlockMode PVC, then it is replaced by HostDisk
// Filesystem PersistenVolumeClaim is mounted into pod as directory from node filesystem
passthoughFSVolumes := make(map[string]struct{})
for i := range vmi.Spec.Domain.Devices.Filesystems {
passthoughFSVolumes[vmi.Spec.Domain.Devices.Filesystems[i].Name] = struct{}{}
}
pvcVolume := make(map[string]v1.VolumeStatus)
hotplugVolumes := make(map[string]bool)
for _, volumeStatus := range vmi.Status.VolumeStatus {
if volumeStatus.HotplugVolume != nil {
hotplugVolumes[volumeStatus.Name] = true
}
if volumeStatus.PersistentVolumeClaimInfo != nil {
pvcVolume[volumeStatus.Name] = volumeStatus
}
}
for i := range vmi.Spec.Volumes {
volume := vmi.Spec.Volumes[i]
volumeSource := &vmi.Spec.Volumes[i].VolumeSource
if volumeSource.PersistentVolumeClaim != nil {
if shouldSkipVolumeSource(passthoughFSVolumes, hotplugVolumes, pvcVolume, volume.Name) {
continue
}
err := replaceForHostDisk(volumeSource, volume.Name, pvcVolume)
if err != nil {
return err
}
// PersistenVolumeClaim is replaced by HostDisk
volumeSource.PersistentVolumeClaim = nil
}
if volumeSource.DataVolume != nil && volumeSource.DataVolume.Name != "" {
if shouldSkipVolumeSource(passthoughFSVolumes, hotplugVolumes, pvcVolume, volume.Name) {
continue
}
err := replaceForHostDisk(volumeSource, volume.Name, pvcVolume)
if err != nil {
return err
}
// PersistenVolumeClaim is replaced by HostDisk
volumeSource.DataVolume = nil
}
}
return nil
}
func replaceForHostDisk(volumeSource *v1.VolumeSource, volumeName string, pvcVolume map[string]v1.VolumeStatus) error {
volumeStatus := pvcVolume[volumeName]
isShared := types.HasSharedAccessMode(volumeStatus.PersistentVolumeClaimInfo.AccessModes)
file := getPVCDiskImgPath(volumeName, "disk.img")
capacity, capacityOk := volumeStatus.PersistentVolumeClaimInfo.Capacity[k8sv1.ResourceStorage]
requested, requestedOk := volumeStatus.PersistentVolumeClaimInfo.Requests[k8sv1.ResourceStorage]
if !capacityOk && !requestedOk {
return fmt.Errorf("unable to determine capacity of HostDisk from PVC that provides no storage capacity or requests")
}
var size int64
// Use the requested size if it is smaller than the overall capacity of the PVC to ensure the created disks are the size requested by the user
if requestedOk && ((capacityOk && capacity.Value() > requested.Value()) || !capacityOk) {
// The host-disk must be 1MiB-aligned. If the volume specifies a misaligned size, shrink it down to the nearest multiple of 1MiB
size = util.AlignImageSizeTo1MiB(requested.Value(), log.Log)
} else {
size = util.AlignImageSizeTo1MiB(capacity.Value(), log.Log)
}
if size == 0 {
return fmt.Errorf("the size for volume %s is too low, must be at least 1MiB", volumeName)
}
capacity.Set(size)
volumeSource.HostDisk = &v1.HostDisk{
Path: file,
Type: v1.HostDiskExistsOrCreate,
Capacity: capacity,
Shared: &isShared,
}
return nil
}
func shouldSkipVolumeSource(passthoughFSVolumes map[string]struct{}, hotplugVolumes map[string]bool, pvcVolume map[string]v1.VolumeStatus, volumeName string) bool {
// If a PVC is used in a Filesystem (passthough), it should not be mapped as a HostDisk and a image file should
// not be created.
if _, isPassthoughFSVolume := passthoughFSVolumes[volumeName]; isPassthoughFSVolume {
log.Log.V(4).Infof("this volume %s is mapped as a filesystem passthrough, will not be replaced by HostDisk", volumeName)
return true
}
if hotplugVolumes[volumeName] {
log.Log.V(4).Infof("this volume %s is hotplugged, will not be replaced by HostDisk", volumeName)
return true
}
volumeStatus, ok := pvcVolume[volumeName]
if !ok || types.IsPVCBlock(volumeStatus.PersistentVolumeClaimInfo.VolumeMode) {
log.Log.V(4).Infof("this volume %s is block, will not be replaced by HostDisk", volumeName)
// This is not a disk on a file system, so skip it.
return true
}
return false
}
func dirBytesAvailable(path string, reserve uint64) (uint64, error) {
var stat syscall.Statfs_t
err := syscall.Statfs(path, &stat)
if err != nil {
return 0, err
}
return stat.Bavail*uint64(stat.Bsize) - reserve, nil
}
func createSparseRaw(diskdir *safepath.Path, diskName string, size int64) (err error) {
offset := size - 1
if filepath.Base(diskName) != diskName {
return fmt.Errorf("Disk name needs to be base")
}
err = safepath.TouchAtNoFollow(diskdir, filepath.Base(diskName), 0666)
if err != nil {
return fmt.Errorf("Failed touch %s,%s : %v", diskdir, diskName, err)
}
diskPath, err := safepath.JoinNoFollow(diskdir, diskName)
if err != nil {
return fmt.Errorf("Failed append %s,%s : %v", diskdir, diskName, err)
}
sFile, err := safepath.OpenAtNoFollow(diskPath)
if err != nil {
return fmt.Errorf("Failed NewFile %s,%s : %v", diskdir, diskName, err)
}
defer util.CloseIOAndCheckErr(sFile, &err)
f, err := os.OpenFile(sFile.SafePath(), os.O_WRONLY, 0666)
if err != nil {
return err
}
defer util.CloseIOAndCheckErr(f, &err)
_, err = f.WriteAt([]byte{0}, offset)
if err != nil {
return err
}
return nil
}
func getPVCDiskImgPath(volumeName string, diskName string) string {
return path.Join(pvcBaseDir, volumeName, diskName)
}
func GetMountedHostDiskPath(volumeName string, path string) string {
return getPVCDiskImgPath(volumeName, filepath.Base(path))
}
func GetMountedHostDiskDir(volumeName string) string {
return getPVCDiskImgPath(volumeName, "")
}
type DiskImgCreator struct {
dirBytesAvailableFunc func(path string, reserve uint64) (uint64, error)
recorder record.EventRecorder
lessPVCSpaceToleration int
minimumPVCReserveBytes uint64
mountRoot *safepath.Path
}
func NewHostDiskCreator(recorder record.EventRecorder, lessPVCSpaceToleration int, minimumPVCReserveBytes uint64, mountRoot *safepath.Path) DiskImgCreator {
return DiskImgCreator{
dirBytesAvailableFunc: dirBytesAvailable,
recorder: recorder,
lessPVCSpaceToleration: lessPVCSpaceToleration,
minimumPVCReserveBytes: minimumPVCReserveBytes,
mountRoot: mountRoot,
}
}
func (hdc *DiskImgCreator) setlessPVCSpaceToleration(toleration int) {
hdc.lessPVCSpaceToleration = toleration
}
func (hdc *DiskImgCreator) Create(vmi *v1.VirtualMachineInstance) error {
for _, volume := range vmi.Spec.Volumes {
if hostDisk := volume.VolumeSource.HostDisk; shouldMountHostDisk(hostDisk) {
if err := hdc.mountHostDiskAndSetOwnership(vmi, volume.Name, hostDisk); err != nil {
return err
}
}
}
return nil
}
func shouldMountHostDisk(hostDisk *v1.HostDisk) bool {
return hostDisk != nil && hostDisk.Type == v1.HostDiskExistsOrCreate && hostDisk.Path != ""
}
func (hdc *DiskImgCreator) mountHostDiskAndSetOwnership(vmi *v1.VirtualMachineInstance, volumeName string, hostDisk *v1.HostDisk) error {
diskDir, err := hdc.mountRoot.AppendAndResolveWithRelativeRoot(GetMountedHostDiskDir(volumeName))
if err != nil {
return fmt.Errorf("Resolve diskdir : %v", err)
}
diskPath, err := safepath.JoinNoFollow(diskDir, filepath.Base(hostDisk.Path))
fileNotExists := errors.Is(err, unix.ENOENT)
if err != nil && !fileNotExists {
return fmt.Errorf("Resolve diskPath :%v", err)
}
if fileNotExists {
if err := hdc.handleRequestedSizeAndCreateSparseRaw(vmi, diskDir, filepath.Base(hostDisk.Path), hostDisk); err != nil {
return err
}
diskPath, err = safepath.JoinNoFollow(diskDir, filepath.Base(hostDisk.Path))
if err != nil {
return fmt.Errorf("last %v", err)
}
// Change file ownership to the qemu user.
if err := ephemeraldiskutils.DefaultOwnershipManager.SetFileOwnership(diskPath); err != nil {
log.Log.Reason(err).Errorf("Couldn't set Ownership on %s: %v", diskPath, err)
return err
}
}
return nil
}
func (hdc *DiskImgCreator) handleRequestedSizeAndCreateSparseRaw(vmi *v1.VirtualMachineInstance, diskDir *safepath.Path, diskName string, hostDisk *v1.HostDisk) error {
size, err := hdc.dirBytesAvailableFunc(unsafepath.UnsafeAbsolute(diskDir.Raw()), hdc.minimumPVCReserveBytes)
availableSize := int64(size)
if err != nil {
return err
}
requestedSize, _ := hostDisk.Capacity.AsInt64()
if requestedSize > availableSize {
requestedSize, err = hdc.shrinkRequestedSize(vmi, requestedSize, availableSize, hostDisk)
if err != nil {
return err
}
}
err = createSparseRaw(diskDir, diskName, requestedSize)
if err != nil {
fullPath := filepath.Join(unsafepath.UnsafeAbsolute(diskDir.Raw()), diskName)
log.Log.Reason(err).Errorf("Couldn't create a sparse raw file for disk path: %s, error: %v", fullPath, err)
return err
}
return nil
}
func (hdc *DiskImgCreator) shrinkRequestedSize(vmi *v1.VirtualMachineInstance, requestedSize int64, availableSize int64, hostDisk *v1.HostDisk) (int64, error) {
// Some storage provisioners provide less space than requested, due to filesystem overhead etc.
// We tolerate some difference in requested and available capacity up to some degree.
// This can be configured with the "pvc-tolerate-less-space-up-to-percent" parameter in the kubevirt-config ConfigMap.
// It is provided as argument to virt-launcher.
toleratedSize := requestedSize * (100 - int64(hdc.lessPVCSpaceToleration)) / 100
if toleratedSize > availableSize {
return 0, fmt.Errorf("unable to create %s, not enough space, demanded size %d B is bigger than available space %d B, also after taking %v %% toleration into account",
hostDisk.Path, uint64(requestedSize), availableSize, hdc.lessPVCSpaceToleration)
}
msg := fmt.Sprintf("PV size too small: expected %v B, found %v B. Using it anyway, it is within %v %% toleration", requestedSize, availableSize, hdc.lessPVCSpaceToleration)
log.Log.Info(msg)
hdc.recorder.Event(vmi, EventTypeToleratedSmallPV, EventReasonToleratedSmallPV, msg)
return availableSize, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package hotplugdisk
import (
"errors"
"fmt"
"os"
"path/filepath"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/kubevirt/pkg/safepath"
"kubevirt.io/kubevirt/pkg/util"
)
var mountBaseDir = filepath.Join(util.VirtShareDir, "/hotplug-disks")
const (
hotplugDisksKubeletVolumePath = "volumes/kubernetes.io~empty-dir/hotplug-disks"
)
var (
// visible for testing
TargetPodBasePath = func(podBaseDir string, podUID types.UID) string {
return filepath.Join(podBaseDir, string(podUID), hotplugDisksKubeletVolumePath)
}
)
type HotplugDiskManagerInterface interface {
GetHotplugTargetPodPathOnHost(virtlauncherPodUID types.UID) (*safepath.Path, error)
GetFileSystemDiskTargetPathFromHostView(virtlauncherPodUID types.UID, volumeName string, create bool) (*safepath.Path, error)
GetFileSystemDirectoryTargetPathFromHostView(virtlauncherPodUID types.UID, volumeName string, create bool) (*safepath.Path, error)
}
func NewHotplugDiskManager(kubeletPodsDir string) *hotplugDiskManager {
return &hotplugDiskManager{
podsBaseDir: filepath.Join(util.HostRootMount, kubeletPodsDir),
}
}
func NewHotplugDiskWithOptions(podsBaseDir string) *hotplugDiskManager {
return &hotplugDiskManager{
podsBaseDir: podsBaseDir,
}
}
type hotplugDiskManager struct {
podsBaseDir string
}
// GetHotplugTargetPodPathOnHost retrieves the target pod (virt-launcher) path on the host.
func (h *hotplugDiskManager) GetHotplugTargetPodPathOnHost(virtlauncherPodUID types.UID) (*safepath.Path, error) {
podpath := TargetPodBasePath(h.podsBaseDir, virtlauncherPodUID)
return safepath.JoinAndResolveWithRelativeRoot("/", podpath)
}
// GetFileSystemDirectoryTargetPathFromHostView gets the directory path in the target pod (virt-launcher) on the host.
func (h *hotplugDiskManager) GetFileSystemDirectoryTargetPathFromHostView(virtlauncherPodUID types.UID, volumeName string, create bool) (*safepath.Path, error) {
targetPath, err := h.GetHotplugTargetPodPathOnHost(virtlauncherPodUID)
if err != nil {
return nil, err
}
_, err = safepath.JoinNoFollow(targetPath, volumeName)
if errors.Is(err, os.ErrNotExist) && create {
if err := safepath.MkdirAtNoFollow(targetPath, volumeName, 0750); err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
return safepath.JoinNoFollow(targetPath, volumeName)
}
// GetFileSystemDiskTargetPathFromHostView gets the disk image file in the target pod (virt-launcher) on the host.
func (h *hotplugDiskManager) GetFileSystemDiskTargetPathFromHostView(virtlauncherPodUID types.UID, volumeName string, create bool) (*safepath.Path, error) {
targetPath, err := h.GetHotplugTargetPodPathOnHost(virtlauncherPodUID)
if err != nil {
return targetPath, err
}
diskName := fmt.Sprintf("%s.img", volumeName)
if err := safepath.TouchAtNoFollow(targetPath, diskName, 0666); err != nil && !os.IsExist(err) {
return nil, err
}
return safepath.JoinNoFollow(targetPath, diskName)
}
// SetLocalDirectory creates the base directory where disk images will be mounted when hotplugged. File system volumes will be in
// a directory under this, that contains the volume name. block volumes will be in this directory as a block device.
func SetLocalDirectory(dir string) error {
mountBaseDir = dir
return os.MkdirAll(dir, 0755)
}
func GetVolumeMountDir(volumeName string) string {
return filepath.Join(mountBaseDir, volumeName)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package annotations
import (
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
virtv1 "kubevirt.io/api/core/v1"
api "kubevirt.io/api/instancetype"
)
func Set(vm *virtv1.VirtualMachine, target metav1.Object) {
if vm.Spec.Instancetype == nil {
return
}
if target.GetAnnotations() == nil {
target.SetAnnotations(make(map[string]string))
}
switch strings.ToLower(vm.Spec.Instancetype.Kind) {
case api.PluralResourceName, api.SingularResourceName:
target.GetAnnotations()[virtv1.InstancetypeAnnotation] = vm.Spec.Instancetype.Name
case "", api.ClusterPluralResourceName, api.ClusterSingularResourceName:
target.GetAnnotations()[virtv1.ClusterInstancetypeAnnotation] = vm.Spec.Instancetype.Name
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
func applyInstanceTypeAnnotations(annotations map[string]string, target metav1.Object) (conflicts conflict.Conflicts) {
if target.GetAnnotations() == nil {
target.SetAnnotations(make(map[string]string))
}
targetAnnotations := target.GetAnnotations()
for key, value := range annotations {
if targetValue, exists := targetAnnotations[key]; exists {
if targetValue != value {
conflicts = append(conflicts, conflict.New("annotations", key))
}
continue
}
targetAnnotations[key] = value
}
return conflicts
}
//nolint:gocyclo
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
k8sv1 "k8s.io/api/core/v1"
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
preferenceApply "kubevirt.io/kubevirt/pkg/instancetype/preference/apply"
)
func applyCPU(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if vmiSpec.Domain.CPU == nil {
vmiSpec.Domain.CPU = &virtv1.CPU{}
}
// If we have any conflicts return as there's no need to apply the topology below
if conflicts := validateCPU(baseConflict, instancetypeSpec, vmiSpec); len(conflicts) > 0 {
return conflicts
}
if instancetypeSpec.CPU.Model != nil {
vmiSpec.Domain.CPU.Model = *instancetypeSpec.CPU.Model
}
if instancetypeSpec.CPU.DedicatedCPUPlacement != nil {
vmiSpec.Domain.CPU.DedicatedCPUPlacement = *instancetypeSpec.CPU.DedicatedCPUPlacement
}
if instancetypeSpec.CPU.IsolateEmulatorThread != nil {
vmiSpec.Domain.CPU.IsolateEmulatorThread = *instancetypeSpec.CPU.IsolateEmulatorThread
}
if instancetypeSpec.CPU.NUMA != nil {
vmiSpec.Domain.CPU.NUMA = instancetypeSpec.CPU.NUMA.DeepCopy()
}
if instancetypeSpec.CPU.Realtime != nil {
vmiSpec.Domain.CPU.Realtime = instancetypeSpec.CPU.Realtime.DeepCopy()
}
if instancetypeSpec.CPU.MaxSockets != nil {
vmiSpec.Domain.CPU.MaxSockets = *instancetypeSpec.CPU.MaxSockets
}
applyGuestCPUTopology(instancetypeSpec.CPU.Guest, preferenceSpec, vmiSpec)
return nil
}
func applyGuestCPUTopology(vCPUs uint32, preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
// Apply the default topology here to avoid duplication below
vmiSpec.Domain.CPU.Cores = 1
vmiSpec.Domain.CPU.Sockets = 1
vmiSpec.Domain.CPU.Threads = 1
if vCPUs == 1 {
return
}
switch preferenceApply.GetPreferredTopology(preferenceSpec) {
case v1beta1.DeprecatedPreferCores, v1beta1.Cores:
vmiSpec.Domain.CPU.Cores = vCPUs
case v1beta1.DeprecatedPreferSockets, v1beta1.DeprecatedPreferAny, v1beta1.Sockets, v1beta1.Any:
vmiSpec.Domain.CPU.Sockets = vCPUs
case v1beta1.DeprecatedPreferThreads, v1beta1.Threads:
vmiSpec.Domain.CPU.Threads = vCPUs
case v1beta1.DeprecatedPreferSpread, v1beta1.Spread:
ratio, across := preferenceApply.GetSpreadOptions(preferenceSpec)
switch across {
case v1beta1.SpreadAcrossSocketsCores:
vmiSpec.Domain.CPU.Cores = ratio
vmiSpec.Domain.CPU.Sockets = vCPUs / ratio
case v1beta1.SpreadAcrossCoresThreads:
vmiSpec.Domain.CPU.Threads = ratio
vmiSpec.Domain.CPU.Cores = vCPUs / ratio
case v1beta1.SpreadAcrossSocketsCoresThreads:
const threadsPerCore = 2
vmiSpec.Domain.CPU.Threads = threadsPerCore
vmiSpec.Domain.CPU.Cores = ratio
vmiSpec.Domain.CPU.Sockets = vCPUs / threadsPerCore / ratio
}
}
}
func validateCPU(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) (conflicts conflict.Conflicts) {
if _, hasCPURequests := vmiSpec.Domain.Resources.Requests[k8sv1.ResourceCPU]; hasCPURequests {
conflicts = append(conflicts, baseConflict.NewChild("domain", "resources", "requests", string(k8sv1.ResourceCPU)))
}
if _, hasCPULimits := vmiSpec.Domain.Resources.Limits[k8sv1.ResourceCPU]; hasCPULimits {
conflicts = append(conflicts, baseConflict.NewChild("domain", "resources", "limits", string(k8sv1.ResourceCPU)))
}
if vmiSpec.Domain.CPU.Sockets != 0 {
conflicts = append(conflicts, baseConflict.NewChild("domain", "cpu", "sockets"))
}
if vmiSpec.Domain.CPU.Cores != 0 {
conflicts = append(conflicts, baseConflict.NewChild("domain", "cpu", "cores"))
}
if vmiSpec.Domain.CPU.Threads != 0 {
conflicts = append(conflicts, baseConflict.NewChild("domain", "cpu", "threads"))
}
if vmiSpec.Domain.CPU.Model != "" && instancetypeSpec.CPU.Model != nil {
conflicts = append(conflicts, baseConflict.NewChild("domain", "cpu", "model"))
}
if vmiSpec.Domain.CPU.DedicatedCPUPlacement && instancetypeSpec.CPU.DedicatedCPUPlacement != nil {
conflicts = append(conflicts, baseConflict.NewChild("domain", "cpu", "dedicatedCPUPlacement"))
}
if vmiSpec.Domain.CPU.IsolateEmulatorThread && instancetypeSpec.CPU.IsolateEmulatorThread != nil {
conflicts = append(conflicts, baseConflict.NewChild("domain", "cpu", "isolateEmulatorThread"))
}
if vmiSpec.Domain.CPU.NUMA != nil && instancetypeSpec.CPU.NUMA != nil {
conflicts = append(conflicts, baseConflict.NewChild("domain", "cpu", "numa"))
}
if vmiSpec.Domain.CPU.Realtime != nil && instancetypeSpec.CPU.Realtime != nil {
conflicts = append(conflicts, baseConflict.NewChild("domain", "cpu", "realtime"))
}
return conflicts
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
func applyGPUs(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if len(instancetypeSpec.GPUs) == 0 {
return nil
}
if len(vmiSpec.Domain.Devices.GPUs) > 0 {
return conflict.Conflicts{baseConflict.NewChild("domain", "devices", "gpus")}
}
vmiSpec.Domain.Devices.GPUs = make([]virtv1.GPU, len(instancetypeSpec.GPUs))
copy(vmiSpec.Domain.Devices.GPUs, instancetypeSpec.GPUs)
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
func applyHostDevices(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if len(instancetypeSpec.HostDevices) == 0 {
return nil
}
if len(vmiSpec.Domain.Devices.HostDevices) > 0 {
return conflict.Conflicts{baseConflict.NewChild("domain", "devices", "hostDevices")}
}
vmiSpec.Domain.Devices.HostDevices = make([]virtv1.HostDevice, len(instancetypeSpec.HostDevices))
copy(vmiSpec.Domain.Devices.HostDevices, instancetypeSpec.HostDevices)
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
func applyIOThreadPolicy(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if instancetypeSpec.IOThreadsPolicy == nil {
return nil
}
if vmiSpec.Domain.IOThreadsPolicy != nil {
return conflict.Conflicts{baseConflict.NewChild("domain", "ioThreadsPolicy")}
}
instancetypeIOThreadPolicy := *instancetypeSpec.IOThreadsPolicy
vmiSpec.Domain.IOThreadsPolicy = &instancetypeIOThreadPolicy
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
"kubevirt.io/kubevirt/pkg/pointer"
)
func applyIOThreads(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if instancetypeSpec.IOThreads == nil || instancetypeSpec.IOThreads.SupplementalPoolThreadCount == nil {
return nil
}
if vmiSpec.Domain.IOThreads != nil && vmiSpec.Domain.IOThreads.SupplementalPoolThreadCount != nil {
return conflict.Conflicts{baseConflict.NewChild("domain", "ioThreads", "supplementalPoolThreadCount")}
}
if vmiSpec.Domain.IOThreads == nil {
vmiSpec.Domain.IOThreads = &virtv1.DiskIOThreads{}
}
vmiSpec.Domain.IOThreads.SupplementalPoolThreadCount = pointer.P(*instancetypeSpec.IOThreads.SupplementalPoolThreadCount)
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
func applyLaunchSecurity(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if instancetypeSpec.LaunchSecurity == nil {
return nil
}
if vmiSpec.Domain.LaunchSecurity != nil {
return conflict.Conflicts{baseConflict.NewChild("domain", "launchSecurity")}
}
vmiSpec.Domain.LaunchSecurity = instancetypeSpec.LaunchSecurity.DeepCopy()
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
func applyMemory(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if vmiSpec.Domain.Memory == nil {
vmiSpec.Domain.Memory = &virtv1.Memory{}
}
// If we have any conflicts return as there's no need to apply
if conflicts := validateMemory(baseConflict, instancetypeSpec, vmiSpec); len(conflicts) > 0 {
return conflicts
}
instancetypeMemory := instancetypeSpec.Memory.Guest.DeepCopy()
vmiSpec.Domain.Memory.Guest = &instancetypeMemory
// If memory overcommit has been requested, set the memory requests to be
// lower than the guest memory by the requested percent.
const totalPercentage = 100
if instancetypeOverCommit := instancetypeSpec.Memory.OvercommitPercent; instancetypeOverCommit > 0 {
if vmiSpec.Domain.Resources.Requests == nil {
vmiSpec.Domain.Resources.Requests = k8sv1.ResourceList{}
}
podRequestedMemory := int64(float32(instancetypeMemory.Value()) * (1 - float32(instancetypeOverCommit)/totalPercentage))
vmiSpec.Domain.Resources.Requests[k8sv1.ResourceMemory] = *resource.NewQuantity(podRequestedMemory, instancetypeMemory.Format)
}
if instancetypeSpec.Memory.Hugepages != nil {
vmiSpec.Domain.Memory.Hugepages = instancetypeSpec.Memory.Hugepages.DeepCopy()
}
if instancetypeSpec.Memory.MaxGuest != nil {
m := instancetypeSpec.Memory.MaxGuest.DeepCopy()
vmiSpec.Domain.Memory.MaxGuest = &m
}
return nil
}
func validateMemory(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) (conflicts conflict.Conflicts) {
if vmiSpec.Domain.Memory.Guest != nil {
return conflict.Conflicts{baseConflict.NewChild("domain", "memory", "guest")}
}
if vmiSpec.Domain.Memory.Hugepages != nil && instancetypeSpec.Memory.Hugepages != nil {
return conflict.Conflicts{baseConflict.NewChild("domain", "memory", "hugepages")}
}
if vmiSpec.Domain.Memory.MaxGuest != nil && instancetypeSpec.Memory.MaxGuest != nil {
return conflict.Conflicts{baseConflict.NewChild("domain", "memory", "maxGuest")}
}
if _, hasMemoryRequests := vmiSpec.Domain.Resources.Requests[k8sv1.ResourceMemory]; hasMemoryRequests {
return conflict.Conflicts{baseConflict.NewChild("domain", "resources", "requests", string(k8sv1.ResourceMemory))}
}
if _, hasMemoryLimits := vmiSpec.Domain.Resources.Limits[k8sv1.ResourceMemory]; hasMemoryLimits {
return conflict.Conflicts{baseConflict.NewChild("domain", "resources", "limits", string(k8sv1.ResourceMemory))}
}
return conflicts
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
"maps"
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
func applyNodeSelector(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if instancetypeSpec.NodeSelector == nil {
return nil
}
if vmiSpec.NodeSelector != nil {
return conflict.Conflicts{baseConflict.NewChild("nodeSelector")}
}
vmiSpec.NodeSelector = maps.Clone(instancetypeSpec.NodeSelector)
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
func applySchedulerName(
baseConflict *conflict.Conflict,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) conflict.Conflicts {
if instancetypeSpec.SchedulerName == "" {
return nil
}
if vmiSpec.SchedulerName != "" {
return conflict.Conflicts{baseConflict.NewChild("schedulerName")}
}
vmiSpec.SchedulerName = instancetypeSpec.SchedulerName
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
type vmiApplyHandler interface {
ApplyToVMI(
field *k8sfield.Path,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
vmiMetadata *metav1.ObjectMeta,
) (conflicts conflict.Conflicts)
}
type specFinder interface {
Find(*virtv1.VirtualMachine) (*v1beta1.VirtualMachineInstancetypeSpec, error)
}
type preferenceSpecFinder interface {
FindPreference(*virtv1.VirtualMachine) (*v1beta1.VirtualMachinePreferenceSpec, error)
}
type vmApplier struct {
vmiApplyHandler
specFinder
preferenceSpecFinder
}
func NewVMApplier(instancetypeFinder specFinder, preferenceFinder preferenceSpecFinder) *vmApplier {
return &vmApplier{
vmiApplyHandler: NewVMIApplier(),
specFinder: instancetypeFinder,
preferenceSpecFinder: preferenceFinder,
}
}
func (a *vmApplier) ApplyToVM(vm *virtv1.VirtualMachine) error {
if vm.Spec.Instancetype == nil && vm.Spec.Preference == nil {
return nil
}
instancetypeSpec, err := a.Find(vm)
if err != nil {
return err
}
preferenceSpec, err := a.FindPreference(vm)
if err != nil {
return err
}
if conflicts := a.ApplyToVMI(
k8sfield.NewPath("spec"),
instancetypeSpec,
preferenceSpec,
&vm.Spec.Template.Spec,
&vm.Spec.Template.ObjectMeta,
); len(conflicts) > 0 {
return fmt.Errorf("VM conflicts with instancetype spec in fields: [%s]", conflicts.String())
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
preferenceApply "kubevirt.io/kubevirt/pkg/instancetype/preference/apply"
)
type preferenceApplier interface {
Apply(*v1beta1.VirtualMachinePreferenceSpec, *virtv1.VirtualMachineInstanceSpec, *metav1.ObjectMeta)
}
type vmiApplier struct {
preferenceApplier preferenceApplier
}
func NewVMIApplier() *vmiApplier {
return &vmiApplier{
preferenceApplier: preferenceApply.New(),
}
}
func (a *vmiApplier) ApplyToVMI(
field *k8sfield.Path,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
vmiMetadata *metav1.ObjectMeta,
) conflict.Conflicts {
if instancetypeSpec == nil && preferenceSpec == nil {
return nil
}
if instancetypeSpec != nil {
baseConflict := conflict.NewFromPath(field)
conflicts := conflict.Conflicts{}
conflicts = append(conflicts, applyNodeSelector(baseConflict, instancetypeSpec, vmiSpec)...)
conflicts = append(conflicts, applySchedulerName(baseConflict, instancetypeSpec, vmiSpec)...)
conflicts = append(conflicts, applyCPU(baseConflict, instancetypeSpec, preferenceSpec, vmiSpec)...)
conflicts = append(conflicts, applyMemory(baseConflict, instancetypeSpec, vmiSpec)...)
conflicts = append(conflicts, applyIOThreads(baseConflict, instancetypeSpec, vmiSpec)...)
conflicts = append(conflicts, applyIOThreadPolicy(baseConflict, instancetypeSpec, vmiSpec)...)
conflicts = append(conflicts, applyLaunchSecurity(baseConflict, instancetypeSpec, vmiSpec)...)
conflicts = append(conflicts, applyGPUs(baseConflict, instancetypeSpec, vmiSpec)...)
conflicts = append(conflicts, applyHostDevices(baseConflict, instancetypeSpec, vmiSpec)...)
conflicts = append(conflicts, applyInstanceTypeAnnotations(instancetypeSpec.Annotations, vmiMetadata)...)
if len(conflicts) > 0 {
return conflicts
}
}
a.preferenceApplier.Apply(preferenceSpec, vmiSpec, vmiMetadata)
return nil
}
//nolint:lll
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package compatibility
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/runtime"
"kubevirt.io/api/instancetype/v1beta1"
generatedscheme "kubevirt.io/client-go/kubevirt/scheme"
)
func GetInstancetypeSpec(revision *appsv1.ControllerRevision) (*v1beta1.VirtualMachineInstancetypeSpec, error) {
if err := Decode(revision); err != nil {
return nil, err
}
switch obj := revision.Data.Object.(type) {
case *v1beta1.VirtualMachineInstancetype:
return &obj.Spec, nil
case *v1beta1.VirtualMachineClusterInstancetype:
return &obj.Spec, nil
default:
return nil, fmt.Errorf("unexpected type in ControllerRevision: %T", obj)
}
}
func GetPreferenceSpec(revision *appsv1.ControllerRevision) (*v1beta1.VirtualMachinePreferenceSpec, error) {
if err := Decode(revision); err != nil {
return nil, err
}
switch obj := revision.Data.Object.(type) {
case *v1beta1.VirtualMachinePreference:
return &obj.Spec, nil
case *v1beta1.VirtualMachineClusterPreference:
return &obj.Spec, nil
default:
return nil, fmt.Errorf("unexpected type in ControllerRevision: %T", obj)
}
}
func Decode(revision *appsv1.ControllerRevision) error {
if len(revision.Data.Raw) == 0 {
return nil
}
return decodeControllerRevisionObject(revision)
}
func decodeControllerRevisionObject(revision *appsv1.ControllerRevision) error {
decodedObj, err := runtime.Decode(generatedscheme.Codecs.UniversalDeserializer(), revision.Data.Raw)
if err != nil {
return fmt.Errorf("failed to decode object in ControllerRevision: %w", err)
}
revision.Data.Object = decodedObj
switch obj := revision.Data.Object.(type) {
case *v1beta1.VirtualMachineInstancetype, *v1beta1.VirtualMachineClusterInstancetype, *v1beta1.VirtualMachinePreference, *v1beta1.VirtualMachineClusterPreference:
return nil
default:
return fmt.Errorf("unexpected type in ControllerRevision: %T", obj)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package conflict
import (
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
)
const conflictsErrorFmt = "VM field(s) %s conflicts with selected instance type"
type Conflict struct {
Message string
k8sfield.Path
}
func New(name string, moreNames ...string) *Conflict {
return &Conflict{
Path: *k8sfield.NewPath(name, moreNames...),
}
}
func NewFromPath(path *k8sfield.Path) *Conflict {
return &Conflict{
Path: *path,
}
}
func NewWithMessage(message, name string, moreNames ...string) *Conflict {
return &Conflict{
Path: *k8sfield.NewPath(name, moreNames...),
Message: message,
}
}
func (c Conflict) NewChild(name string, moreNames ...string) *Conflict {
return &Conflict{
Path: *c.Child(name, moreNames...),
}
}
func (c Conflict) Error() string {
if c.Message != "" {
return c.Message
}
return fmt.Sprintf(conflictsErrorFmt, c.String())
}
func (c Conflict) StatusCause() metav1.StatusCause {
return metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: c.Error(),
Field: c.String(),
}
}
func (c Conflict) StatusCauses() []metav1.StatusCause {
return []metav1.StatusCause{c.StatusCause()}
}
type Conflicts []*Conflict
func (c Conflicts) String() string {
pathStrings := make([]string, 0, len(c))
for _, path := range c {
pathStrings = append(pathStrings, path.String())
}
return strings.Join(pathStrings, ", ")
}
func (c Conflicts) Error() string {
return fmt.Sprintf(conflictsErrorFmt, c.String())
}
func (c Conflicts) StatusCauses() []metav1.StatusCause {
causes := make([]metav1.StatusCause, 0, len(c))
for _, conflict := range c {
causes = append(causes, conflict.StatusCause())
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vm
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/annotations"
"kubevirt.io/kubevirt/pkg/instancetype/apply"
"kubevirt.io/kubevirt/pkg/instancetype/expand"
"kubevirt.io/kubevirt/pkg/instancetype/find"
preferenceannotations "kubevirt.io/kubevirt/pkg/instancetype/preference/annotations"
preferenceapply "kubevirt.io/kubevirt/pkg/instancetype/preference/apply"
preferencefind "kubevirt.io/kubevirt/pkg/instancetype/preference/find"
"kubevirt.io/kubevirt/pkg/instancetype/revision"
"kubevirt.io/kubevirt/pkg/instancetype/upgrade"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/common"
)
type applyVMHandler interface {
ApplyToVM(*virtv1.VirtualMachine) error
}
type instancetypeFindHandler interface {
Find(*virtv1.VirtualMachine) (*v1beta1.VirtualMachineInstancetypeSpec, error)
}
type preferenceFindHandler interface {
FindPreference(*virtv1.VirtualMachine) (*v1beta1.VirtualMachinePreferenceSpec, error)
}
type expandHandler interface {
Expand(*virtv1.VirtualMachine) (*virtv1.VirtualMachine, error)
}
type storeHandler interface {
Store(*virtv1.VirtualMachine) error
}
type upgradeHandler interface {
Upgrade(*virtv1.VirtualMachine) error
}
type controller struct {
applyVMHandler
storeHandler
expandHandler
upgradeHandler
instancetypeFindHandler
preferenceFindHandler
clientset kubecli.KubevirtClient
clusterConfig *virtconfig.ClusterConfig
recorder record.EventRecorder
}
func New(
instancetypeStore, clusterInstancetypeStore, preferenceStore, clusterPreferenceStore, revisionStore cache.Store,
virtClient kubecli.KubevirtClient, clusterConfig *virtconfig.ClusterConfig, recorder record.EventRecorder,
) *controller {
finder := find.NewSpecFinder(instancetypeStore, clusterInstancetypeStore, revisionStore, virtClient)
prefFinder := preferencefind.NewSpecFinder(preferenceStore, clusterPreferenceStore, revisionStore, virtClient)
return &controller{
instancetypeFindHandler: finder,
preferenceFindHandler: prefFinder,
applyVMHandler: apply.NewVMApplier(finder, prefFinder),
storeHandler: revision.New(instancetypeStore, clusterInstancetypeStore, preferenceStore, clusterPreferenceStore, virtClient),
expandHandler: expand.New(clusterConfig, finder, prefFinder),
upgradeHandler: upgrade.New(revisionStore, virtClient),
clientset: virtClient,
clusterConfig: clusterConfig,
recorder: recorder,
}
}
const (
storeControllerRevisionErrFmt = "error encountered while storing instancetype.kubevirt.io controllerRevisions: %v"
upgradeControllerRevisionErrFmt = "error encountered while upgrading instancetype.kubevirt.io controllerRevisions: %v"
cleanControllerRevisionErrFmt = "error encountered cleaning controllerRevision %s after successfully expanding VirtualMachine %s: %v"
)
func (c *controller) Sync(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachine, error) {
if vm.Spec.Instancetype == nil && vm.Spec.Preference == nil {
return vm, nil
}
// Before we sync ensure any referenced resources exist
if syncErr := c.checkResourcesExist(vm); syncErr != nil {
return vm, syncErr
}
referencePolicy := c.clusterConfig.GetInstancetypeReferencePolicy()
switch referencePolicy {
case virtv1.Reference:
// Ensure we have controllerRevisions of any instancetype or preferences referenced by the VM
if err := c.Store(vm); err != nil {
log.Log.Object(vm).Errorf(storeControllerRevisionErrFmt, err)
c.recorder.Eventf(vm, corev1.EventTypeWarning, common.FailedCreateVirtualMachineReason, storeControllerRevisionErrFmt, err)
return vm, common.NewSyncError(fmt.Errorf(storeControllerRevisionErrFmt, err), common.FailedCreateVirtualMachineReason)
}
case virtv1.Expand, virtv1.ExpandAll:
return c.handleExpand(vm, referencePolicy)
}
// If we have controllerRevisions make sure they are fully up to date before proceeding
if err := c.Upgrade(vm); err != nil {
log.Log.Object(vm).Reason(err).Errorf(upgradeControllerRevisionErrFmt, err)
c.recorder.Eventf(vm, corev1.EventTypeWarning, common.FailedCreateVirtualMachineReason, upgradeControllerRevisionErrFmt, err)
return vm, common.NewSyncError(fmt.Errorf(upgradeControllerRevisionErrFmt, err), common.FailedCreateVirtualMachineReason)
}
return vm, nil
}
func (c *controller) checkResourcesExist(vm *virtv1.VirtualMachine) error {
const (
failedFindInstancetype = "FailedFindInstancetype"
failedFindPreference = "FailedFindPreference"
)
if _, err := c.Find(vm); err != nil {
return common.NewSyncError(err, failedFindInstancetype)
}
if _, err := c.FindPreference(vm); err != nil {
return common.NewSyncError(err, failedFindPreference)
}
return nil
}
func (c *controller) handleExpand(
vm *virtv1.VirtualMachine,
referencePolicy virtv1.InstancetypeReferencePolicy,
) (*virtv1.VirtualMachine, error) {
if referencePolicy == virtv1.Expand {
if revision.HasControllerRevisionRef(vm.Status.InstancetypeRef) {
log.Log.Object(vm).Infof("not expanding as instance type already has revisionName")
return vm, nil
}
if revision.HasControllerRevisionRef(vm.Status.PreferenceRef) {
log.Log.Object(vm).Infof("not expanding as preference already has revisionName")
return vm, nil
}
}
expandVMCopy, err := c.Expand(vm)
if err != nil {
return vm, fmt.Errorf("error encountered while expanding into VirtualMachine: %v", err)
}
// Only update the VM if we have changed something by applying an instance type and preference
if !equality.Semantic.DeepEqual(vm, expandVMCopy) {
updatedVM, err := c.clientset.VirtualMachine(expandVMCopy.Namespace).Update(
context.Background(), expandVMCopy, metav1.UpdateOptions{})
if err != nil {
return vm, fmt.Errorf("error encountered when trying to update expanded VirtualMachine: %v", err)
}
updatedVM.Status = expandVMCopy.Status
updatedVM, err = c.clientset.VirtualMachine(updatedVM.Namespace).UpdateStatus(
context.Background(), updatedVM, metav1.UpdateOptions{})
if err != nil {
return vm, fmt.Errorf("error encountered when trying to update expanded VirtualMachine Status: %v", err)
}
// We should clean up any instance type or preference controllerRevisions after successfully expanding the VM
if revision.HasControllerRevisionRef(vm.Status.InstancetypeRef) {
if err = c.clientset.AppsV1().ControllerRevisions(vm.Namespace).Delete(
context.Background(), vm.Status.InstancetypeRef.ControllerRevisionRef.Name, metav1.DeleteOptions{}); err != nil {
return nil, common.NewSyncError(
fmt.Errorf(cleanControllerRevisionErrFmt, vm.Status.InstancetypeRef.ControllerRevisionRef.Name, vm.Name, err),
common.FailedCreateVirtualMachineReason,
)
}
}
if revision.HasControllerRevisionRef(vm.Status.PreferenceRef) {
if err = c.clientset.AppsV1().ControllerRevisions(vm.Namespace).Delete(
context.Background(), vm.Status.PreferenceRef.ControllerRevisionRef.Name, metav1.DeleteOptions{}); err != nil {
return nil, common.NewSyncError(
fmt.Errorf(cleanControllerRevisionErrFmt, vm.Status.PreferenceRef.ControllerRevisionRef.Name, vm.Name, err),
common.FailedCreateVirtualMachineReason,
)
}
}
return updatedVM, nil
}
return vm, nil
}
func (c *controller) ApplyAutoAttachPreferences(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if vm.Spec.Preference == nil {
return nil
}
preferenceSpec, err := c.FindPreference(vm)
if err != nil {
return err
}
preferenceapply.ApplyAutoAttachPreferences(preferenceSpec, &vmi.Spec)
return nil
}
func (c *controller) ApplyToVMI(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
instancetypeSpec, err := c.Find(vm)
if err != nil {
return err
}
preferenceSpec, err := c.FindPreference(vm)
if err != nil {
return err
}
if instancetypeSpec == nil && preferenceSpec == nil {
return nil
}
annotations.Set(vm, vmi)
preferenceannotations.Set(vm, vmi)
if conflicts := apply.NewVMIApplier().ApplyToVMI(
k8sfield.NewPath("spec"),
instancetypeSpec,
preferenceSpec,
&vmi.Spec,
&vmi.ObjectMeta,
); len(conflicts) > 0 {
return fmt.Errorf("VMI conflicts with instancetype spec in fields: [%s]", conflicts.String())
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package vm
import (
virtv1 "kubevirt.io/api/core/v1"
)
type controllerStub struct {
syncFunc func(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachine, error)
applyToVMFunc func(*virtv1.VirtualMachine) error
applyToVMIFunc func(*virtv1.VirtualMachine, *virtv1.VirtualMachineInstance) error
applyAutoAttachPreferencesFunc func(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error
}
func NewControllerStub() *controllerStub {
return &controllerStub{
syncFunc: func(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachine, error) {
return vm, nil
},
applyToVMFunc: func(*virtv1.VirtualMachine) error {
return nil
},
applyToVMIFunc: func(*virtv1.VirtualMachine, *virtv1.VirtualMachineInstance) error {
return nil
},
applyAutoAttachPreferencesFunc: func(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
return nil
},
}
}
func (m *controllerStub) ApplyToVM(vm *virtv1.VirtualMachine) error {
return m.applyToVMFunc(vm)
}
func (m *controllerStub) ApplyToVMI(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
return m.applyToVMIFunc(vm, vmi)
}
func (m *controllerStub) Sync(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachine, error) {
return m.syncFunc(vm, vmi)
}
func (m *controllerStub) ApplyAutoAttachPreferences(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
return m.applyAutoAttachPreferencesFunc(vm, vmi)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package expand
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/defaults"
"kubevirt.io/kubevirt/pkg/instancetype/apply"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
"kubevirt.io/kubevirt/pkg/network/vmispec"
utils "kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
type vmiApplier interface {
ApplyToVMI(
field *k8sfield.Path,
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
vmiMetadata *metav1.ObjectMeta,
) (conflicts conflict.Conflicts)
}
type specFinder interface {
Find(*virtv1.VirtualMachine) (*v1beta1.VirtualMachineInstancetypeSpec, error)
}
type preferenceSpecFinder interface {
FindPreference(*virtv1.VirtualMachine) (*v1beta1.VirtualMachinePreferenceSpec, error)
}
type expander struct {
vmiApplier
specFinder
preferenceSpecFinder
clusterConfig *virtconfig.ClusterConfig
}
func New(
clusterConfig *virtconfig.ClusterConfig,
instancetypeFinder specFinder,
preferenceFinder preferenceSpecFinder,
) *expander {
return &expander{
clusterConfig: clusterConfig,
vmiApplier: apply.NewVMIApplier(),
specFinder: instancetypeFinder,
preferenceSpecFinder: preferenceFinder,
}
}
func (e *expander) Expand(vm *virtv1.VirtualMachine) (*virtv1.VirtualMachine, error) {
if vm.Spec.Instancetype == nil && vm.Spec.Preference == nil {
return vm, nil
}
instancetypeSpec, err := e.Find(vm)
if err != nil {
return nil, err
}
preferenceSpec, err := e.FindPreference(vm)
if err != nil {
return nil, err
}
expandedVM := vm.DeepCopy()
utils.SetDefaultVolumeDisk(&expandedVM.Spec.Template.Spec)
if err := vmispec.SetDefaultNetworkInterface(e.clusterConfig, &expandedVM.Spec.Template.Spec); err != nil {
return nil, err
}
// Replace with VMApplier.ApplyToVM once conflict errors are aligned
conflicts := e.ApplyToVMI(
k8sfield.NewPath("spec", "template", "spec"),
instancetypeSpec, preferenceSpec,
&expandedVM.Spec.Template.Spec,
&expandedVM.Spec.Template.ObjectMeta,
)
if len(conflicts) > 0 {
return nil, conflicts
}
// Apply defaults to VM.Spec.Template.Spec after applying instance types to ensure we don't conflict
if err := defaults.SetDefaultVirtualMachineInstanceSpec(e.clusterConfig, &expandedVM.Spec.Template.Spec); err != nil {
return nil, err
}
// Remove {Instancetype,Preference}Matcher and {Instancetype,Preference}Ref
expandedVM.Spec.Instancetype = nil
expandedVM.Status.InstancetypeRef = nil
expandedVM.Spec.Preference = nil
expandedVM.Status.PreferenceRef = nil
return expandedVM, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
)
type clusterInstancetypeFinder struct {
store cache.Store
virtClient kubecli.KubevirtClient
}
func NewClusterInstancetypeFinder(store cache.Store, virtClient kubecli.KubevirtClient) *clusterInstancetypeFinder {
return &clusterInstancetypeFinder{
store: store,
virtClient: virtClient,
}
}
func (f *clusterInstancetypeFinder) Find(vm *virtv1.VirtualMachine) (*v1beta1.VirtualMachineClusterInstancetype, error) {
if vm.Spec.Instancetype == nil {
return nil, nil
}
if f.store == nil {
return f.virtClient.VirtualMachineClusterInstancetype().Get(
context.Background(), vm.Spec.Instancetype.Name, metav1.GetOptions{})
}
obj, exists, err := f.store.GetByKey(vm.Spec.Instancetype.Name)
if err != nil {
return nil, err
}
if !exists {
return f.virtClient.VirtualMachineClusterInstancetype().Get(
context.Background(), vm.Spec.Instancetype.Name, metav1.GetOptions{})
}
instancetype, ok := obj.(*v1beta1.VirtualMachineClusterInstancetype)
if !ok {
return nil, fmt.Errorf("unknown object type found in VirtualMachineClusterInstancetype informer")
}
return instancetype, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"kubevirt.io/client-go/kubecli"
)
type controllerRevisionFinder struct {
store cache.Store
virtClient kubecli.KubevirtClient
}
func NewControllerRevisionFinder(store cache.Store, virtClient kubecli.KubevirtClient) *controllerRevisionFinder {
return &controllerRevisionFinder{
store: store,
virtClient: virtClient,
}
}
func (f *controllerRevisionFinder) Find(namespacedName types.NamespacedName) (*appsv1.ControllerRevision, error) {
if f.store == nil {
return f.virtClient.AppsV1().ControllerRevisions(namespacedName.Namespace).Get(
context.Background(), namespacedName.Name, metav1.GetOptions{})
}
obj, exists, err := f.store.GetByKey(namespacedName.String())
if err != nil {
return nil, err
}
if !exists {
return f.virtClient.AppsV1().ControllerRevisions(namespacedName.Namespace).Get(
context.Background(), namespacedName.Name, metav1.GetOptions{})
}
revision, ok := obj.(*appsv1.ControllerRevision)
if !ok {
return nil, fmt.Errorf("unknown object type found in ControllerRevision informer")
}
return revision, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
)
type instancetypeFinder struct {
store cache.Store
virtClient kubecli.KubevirtClient
}
func NewInstancetypeFinder(store cache.Store, virtClient kubecli.KubevirtClient) *instancetypeFinder {
return &instancetypeFinder{
store: store,
virtClient: virtClient,
}
}
func (f *instancetypeFinder) Find(vm *virtv1.VirtualMachine) (*v1beta1.VirtualMachineInstancetype, error) {
if vm.Spec.Instancetype == nil {
return nil, nil
}
namespacedName := types.NamespacedName{
Namespace: vm.Namespace,
Name: vm.Spec.Instancetype.Name,
}
if f.store == nil {
return f.virtClient.VirtualMachineInstancetype(namespacedName.Namespace).Get(
context.Background(), namespacedName.Name, metav1.GetOptions{})
}
obj, exists, err := f.store.GetByKey(namespacedName.String())
if err != nil {
return nil, err
}
if !exists {
return f.virtClient.VirtualMachineInstancetype(namespacedName.Namespace).Get(
context.Background(), namespacedName.Name, metav1.GetOptions{})
}
instancetype, ok := obj.(*v1beta1.VirtualMachineInstancetype)
if !ok {
return nil, fmt.Errorf("unknown object type found in VirtualMachineInstancetype informer")
}
return instancetype, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
instancetypeapi "kubevirt.io/api/instancetype"
"kubevirt.io/client-go/kubecli"
)
type revisionFinder struct {
controllerRevisionFinder *controllerRevisionFinder
}
func NewRevisionFinder(store cache.Store, virtClient kubecli.KubevirtClient) *revisionFinder {
return &revisionFinder{
controllerRevisionFinder: NewControllerRevisionFinder(store, virtClient),
}
}
func (f *revisionFinder) Find(vm *virtv1.VirtualMachine) (*appsv1.ControllerRevision, error) {
// Avoid a race with Store() here and use RevisionName if already provided over Whatever is in ControllerRevisionRef
if vm.Spec.Instancetype != nil && vm.Spec.Instancetype.RevisionName != "" {
return f.controllerRevisionFinder.Find(types.NamespacedName{
Namespace: vm.Namespace,
Name: vm.Spec.Instancetype.RevisionName,
})
}
ref := vm.Status.InstancetypeRef
if ref != nil && ref.ControllerRevisionRef != nil && ref.ControllerRevisionRef.Name != "" {
cr, err := f.controllerRevisionFinder.Find(types.NamespacedName{
Namespace: vm.Namespace,
Name: ref.ControllerRevisionRef.Name,
})
if err != nil {
return nil, err
}
// Only return the found CR if it is for the referenced instance type
if label, ok := cr.Labels[instancetypeapi.ControllerRevisionObjectNameLabel]; ok && label == vm.Spec.Instancetype.Name {
return cr, nil
}
}
return nil, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
"fmt"
"strings"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
api "kubevirt.io/api/instancetype"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/kubevirt/pkg/instancetype/compatibility"
)
type specFinder struct {
instancetypeFinder *instancetypeFinder
clusterInstancetypeFinder *clusterInstancetypeFinder
revisionFinder *revisionFinder
}
func NewSpecFinder(store, clusterStore, revisionStore cache.Store, virtClient kubecli.KubevirtClient) *specFinder {
return &specFinder{
instancetypeFinder: NewInstancetypeFinder(store, virtClient),
clusterInstancetypeFinder: NewClusterInstancetypeFinder(clusterStore, virtClient),
revisionFinder: NewRevisionFinder(revisionStore, virtClient),
}
}
const unexpectedKindFmt = "got unexpected kind in InstancetypeMatcher: %s"
func (f *specFinder) Find(vm *virtv1.VirtualMachine) (*v1beta1.VirtualMachineInstancetypeSpec, error) {
if vm.Spec.Instancetype == nil {
return nil, nil
}
revision, err := f.revisionFinder.Find(vm)
if err != nil {
return nil, err
}
if revision != nil {
return compatibility.GetInstancetypeSpec(revision)
}
switch strings.ToLower(vm.Spec.Instancetype.Kind) {
case api.SingularResourceName, api.PluralResourceName:
instancetype, err := f.instancetypeFinder.Find(vm)
if err != nil {
return nil, err
}
return &instancetype.Spec, nil
case api.ClusterSingularResourceName, api.ClusterPluralResourceName, "":
clusterInstancetype, err := f.clusterInstancetypeFinder.Find(vm)
if err != nil {
return nil, err
}
return &clusterInstancetype.Spec, nil
default:
return nil, fmt.Errorf(unexpectedKindFmt, vm.Spec.Instancetype.Kind)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package infer
type IgnoreableInferenceError struct {
err error
}
func (e *IgnoreableInferenceError) Error() string {
return e.err.Error()
}
func (e *IgnoreableInferenceError) Unwrap() error {
return e.err
}
func NewIgnoreableInferenceError(err error) error {
return &IgnoreableInferenceError{err: err}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package infer
import (
"errors"
virtv1 "kubevirt.io/api/core/v1"
api "kubevirt.io/api/instancetype"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
)
const logVerbosityLevel = 3
type handler struct {
virtClient kubecli.KubevirtClient
}
func New(virtClient kubecli.KubevirtClient) *handler {
return &handler{
virtClient: virtClient,
}
}
func shouldIgnoreFailure(ignoreFailurePolicy *virtv1.InferFromVolumeFailurePolicy) bool {
return ignoreFailurePolicy != nil && *ignoreFailurePolicy == virtv1.IgnoreInferFromVolumeFailure
}
func (h *handler) Infer(vm *virtv1.VirtualMachine) error {
if err := h.Instancetype(vm); err != nil {
return err
}
if err := h.Preference(vm); err != nil {
return err
}
return nil
}
func (h *handler) Instancetype(vm *virtv1.VirtualMachine) error {
if vm.Spec.Instancetype == nil {
return nil
}
// Leave matcher unchanged when inference is disabled
if vm.Spec.Instancetype.InferFromVolume == "" {
return nil
}
ignoreFailure := shouldIgnoreFailure(vm.Spec.Instancetype.InferFromVolumeFailurePolicy)
defaultName, defaultKind, err := h.fromVolumes(
vm, vm.Spec.Instancetype.InferFromVolume, api.DefaultInstancetypeLabel, api.DefaultInstancetypeKindLabel)
if err != nil {
var ignoreableInferenceErr *IgnoreableInferenceError
if errors.As(err, &ignoreableInferenceErr) && ignoreFailure {
log.Log.Object(vm).V(logVerbosityLevel).Info("Ignored error during inference of instancetype, clearing matcher.")
vm.Spec.Instancetype = nil
return nil
}
return err
}
if ignoreFailure {
vm.Spec.Template.Spec.Domain.Memory = nil
}
vm.Spec.Instancetype = &virtv1.InstancetypeMatcher{
Name: defaultName,
Kind: defaultKind,
}
return nil
}
func (h *handler) Preference(vm *virtv1.VirtualMachine) error {
if vm.Spec.Preference == nil {
return nil
}
// Leave matcher unchanged when inference is disabled
if vm.Spec.Preference.InferFromVolume == "" {
return nil
}
ignoreFailure := shouldIgnoreFailure(vm.Spec.Preference.InferFromVolumeFailurePolicy)
defaultName, defaultKind, err := h.fromVolumes(
vm, vm.Spec.Preference.InferFromVolume, api.DefaultPreferenceLabel, api.DefaultPreferenceKindLabel)
if err != nil {
var ignoreableInferenceErr *IgnoreableInferenceError
if errors.As(err, &ignoreableInferenceErr) && ignoreFailure {
log.Log.Object(vm).V(logVerbosityLevel).Info("Ignored error during inference of preference, clearing matcher.")
vm.Spec.Preference = nil
return nil
}
return err
}
vm.Spec.Preference = &virtv1.PreferenceMatcher{
Name: defaultName,
Kind: defaultKind,
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package infer
import (
"context"
"errors"
"fmt"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
virtv1 "kubevirt.io/api/core/v1"
)
const (
unsupportedVolumeTypeFmt = "unable to infer defaults from volume %s as type is not supported"
missingLabelFmt = "unable to find required %s label on the volume"
unsupportedDataVolumeSource = "unable to infer defaults from DataVolumeSpec as DataVolumeSource is not supported"
missingDataVolumeSourcePVC = "unable to infer defaults from DataSource that doesn't provide DataVolumeSourcePVC"
unsupportedDataVolumeSourceRefFmt = "unable to infer defaults from DataVolumeSourceRef as Kind %s is not supported"
)
/*
Defaults will be inferred from the following combinations of DataVolumeSources, DataVolumeTemplates, DataSources and PVCs:
Volume -> PersistentVolumeClaimVolumeSource -> PersistentVolumeClaim
Volume -> DataVolumeSource -> DataVolume
Volume -> DataVolumeSource -> DataVolumeSourcePVC -> PersistentVolumeClaim
Volume -> DataVolumeSource -> DataVolumeSourceRef -> DataSource
Volume -> DataVolumeSource -> DataVolumeSourceRef -> DataSource -> PersistentVolumeClaim
Volume -> DataVolumeSource -> DataVolumeTemplate -> DataVolumeSourcePVC -> PersistentVolumeClaim
Volume -> DataVolumeSource -> DataVolumeTemplate -> DataVolumeSourceRef -> DataSource
Volume -> DataVolumeSource -> DataVolumeTemplate -> DataVolumeSourceRef -> DataSource -> PersistentVolumeClaim
*/
func (h *handler) fromVolumes(
vm *virtv1.VirtualMachine, inferFromVolumeName, defaultNameLabel, defaultKindLabel string,
) (defaultName, defaultKind string, err error) {
for _, volume := range vm.Spec.Template.Spec.Volumes {
if volume.Name != inferFromVolumeName {
continue
}
if volume.PersistentVolumeClaim != nil {
return h.fromPVC(volume.PersistentVolumeClaim.ClaimName, vm.Namespace, defaultNameLabel, defaultKindLabel)
}
if volume.DataVolume != nil {
return h.fromDataVolume(vm, volume.DataVolume.Name, defaultNameLabel, defaultKindLabel)
}
return "", "", NewIgnoreableInferenceError(fmt.Errorf(unsupportedVolumeTypeFmt, inferFromVolumeName))
}
return "", "", fmt.Errorf("unable to find volume %s to infer defaults", inferFromVolumeName)
}
func fromLabels(labels map[string]string, defaultNameLabel, defaultKindLabel string) (defaultName, defaultKind string, err error) {
defaultName, hasLabel := labels[defaultNameLabel]
if !hasLabel {
return "", "", NewIgnoreableInferenceError(fmt.Errorf(missingLabelFmt, defaultNameLabel))
}
return defaultName, labels[defaultKindLabel], nil
}
func (h *handler) fromPVC(pvcName, pvcNamespace, defaultNameLabel, defaultKindLabel string) (defaultName, defaultKind string, err error) {
pvc, err := h.virtClient.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.Background(), pvcName, metav1.GetOptions{})
if err != nil {
return "", "", err
}
return fromLabels(pvc.Labels, defaultNameLabel, defaultKindLabel)
}
func (h *handler) fromDataVolume(
vm *virtv1.VirtualMachine, dvName, defaultNameLabel, defaultKindLabel string,
) (defaultName, defaultKind string, err error) {
if len(vm.Spec.DataVolumeTemplates) > 0 {
for _, dvt := range vm.Spec.DataVolumeTemplates {
if dvt.Name != dvName {
continue
}
dvtSpec := dvt.Spec
return h.fromDataVolumeSpec(&dvtSpec, defaultNameLabel, defaultKindLabel, vm.Namespace)
}
}
dv, err := h.virtClient.CdiClient().CdiV1beta1().DataVolumes(vm.Namespace).Get(context.Background(), dvName, metav1.GetOptions{})
if err != nil {
// Handle garbage collected DataVolumes by attempting to lookup the PVC using the name of the DataVolume in the VM namespace
if k8serrors.IsNotFound(err) {
return h.fromPVC(dvName, vm.Namespace, defaultNameLabel, defaultKindLabel)
}
return "", "", err
}
// Check the DataVolume for any labels before checking the underlying PVC
defaultName, defaultKind, err = fromLabels(dv.Labels, defaultNameLabel, defaultKindLabel)
if err == nil {
return defaultName, defaultKind, nil
}
return h.fromDataVolumeSpec(&dv.Spec, defaultNameLabel, defaultKindLabel, vm.Namespace)
}
func (h *handler) fromDataVolumeSpec(
dataVolumeSpec *cdiv1beta1.DataVolumeSpec, defaultNameLabel, defaultKindLabel, vmNameSpace string,
) (defaultName, defaultKind string, err error) {
if dataVolumeSpec != nil && dataVolumeSpec.Source != nil && dataVolumeSpec.Source.PVC != nil {
return h.fromPVC(dataVolumeSpec.Source.PVC.Name, dataVolumeSpec.Source.PVC.Namespace, defaultNameLabel, defaultKindLabel)
}
if dataVolumeSpec != nil && dataVolumeSpec.SourceRef != nil {
return h.fromDataVolumeSourceRef(dataVolumeSpec.SourceRef, defaultNameLabel, defaultKindLabel, vmNameSpace)
}
return "", "", NewIgnoreableInferenceError(errors.New(unsupportedDataVolumeSource))
}
func (h *handler) fromDataSource(
dataSourceName, dataSourceNamespace, defaultNameLabel, defaultKindLabel string,
) (defaultName, defaultKind string, err error) {
ds, err := h.virtClient.CdiClient().CdiV1beta1().DataSources(dataSourceNamespace).Get(
context.Background(), dataSourceName, metav1.GetOptions{})
if err != nil {
return "", "", err
}
// Check the DataSource for any labels before checking the underlying PVC
defaultName, defaultKind, err = fromLabels(ds.Labels, defaultNameLabel, defaultKindLabel)
if err == nil {
return defaultName, defaultKind, nil
}
if ds.Spec.Source.PVC != nil {
return h.fromPVC(ds.Spec.Source.PVC.Name, ds.Spec.Source.PVC.Namespace, defaultNameLabel, defaultKindLabel)
}
return "", "", NewIgnoreableInferenceError(errors.New(missingDataVolumeSourcePVC))
}
func (h *handler) fromDataVolumeSourceRef(
sourceRef *cdiv1beta1.DataVolumeSourceRef, defaultNameLabel, defaultKindLabel, vmNameSpace string,
) (defaultName, defaultKind string, err error) {
if sourceRef.Kind == "DataSource" {
// The namespace can be left blank here with the assumption that the DataSource is in the same namespace as the VM
namespace := vmNameSpace
if sourceRef.Namespace != nil {
namespace = *sourceRef.Namespace
}
return h.fromDataSource(sourceRef.Name, namespace, defaultNameLabel, defaultKindLabel)
}
return "", "", NewIgnoreableInferenceError(fmt.Errorf(unsupportedDataVolumeSourceRefFmt, sourceRef.Kind))
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package annotations
import (
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
virtv1 "kubevirt.io/api/core/v1"
api "kubevirt.io/api/instancetype"
)
func Set(vm *virtv1.VirtualMachine, target metav1.Object) {
if vm.Spec.Preference == nil {
return
}
if target.GetAnnotations() == nil {
target.SetAnnotations(make(map[string]string))
}
switch strings.ToLower(vm.Spec.Preference.Kind) {
case api.PluralPreferenceResourceName, api.SingularPreferenceResourceName:
target.GetAnnotations()[virtv1.PreferenceAnnotation] = vm.Spec.Preference.Name
case "", api.ClusterPluralPreferenceResourceName, api.ClusterSingularPreferenceResourceName:
target.GetAnnotations()[virtv1.ClusterPreferenceAnnotation] = vm.Spec.Preference.Name
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
func applyPreferenceAnnotations(annotations map[string]string, target metav1.Object) {
if target.GetAnnotations() == nil {
target.SetAnnotations(make(map[string]string))
}
targetAnnotations := target.GetAnnotations()
for key, value := range annotations {
if _, exists := targetAnnotations[key]; exists {
continue
}
targetAnnotations[key] = value
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
func ApplyArchitecturePreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec == nil || preferenceSpec.PreferredArchitecture == nil || *preferenceSpec.PreferredArchitecture == "" {
return
}
if vmiSpec.Architecture == "" {
vmiSpec.Architecture = *preferenceSpec.PreferredArchitecture
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
func applyClockPreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.Clock == nil {
return
}
if vmiSpec.Domain.Clock == nil {
vmiSpec.Domain.Clock = &virtv1.Clock{}
}
// We don't want to allow a partial overwrite here so only replace when nothing is set
if preferenceSpec.Clock.PreferredClockOffset != nil &&
vmiSpec.Domain.Clock.ClockOffset.UTC == nil &&
vmiSpec.Domain.Clock.ClockOffset.Timezone == nil {
vmiSpec.Domain.Clock.ClockOffset = *preferenceSpec.Clock.PreferredClockOffset.DeepCopy()
}
if preferenceSpec.Clock.PreferredTimer != nil && vmiSpec.Domain.Clock.Timer == nil {
vmiSpec.Domain.Clock.Timer = preferenceSpec.Clock.PreferredTimer.DeepCopy()
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
func applyCPUPreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.CPU == nil || len(preferenceSpec.CPU.PreferredCPUFeatures) == 0 {
return
}
// Only apply any preferred CPU features when the same feature has not been provided by a user already
cpuFeatureNames := make(map[string]struct{})
for _, cpuFeature := range vmiSpec.Domain.CPU.Features {
cpuFeatureNames[cpuFeature.Name] = struct{}{}
}
for _, preferredCPUFeature := range preferenceSpec.CPU.PreferredCPUFeatures {
if _, foundCPUFeature := cpuFeatureNames[preferredCPUFeature.Name]; !foundCPUFeature {
vmiSpec.Domain.CPU.Features = append(vmiSpec.Domain.CPU.Features, preferredCPUFeature)
}
}
}
func GetPreferredTopology(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec) v1beta1.PreferredCPUTopology {
// Default to PreferSockets when a PreferredCPUTopology isn't provided
preferredTopology := v1beta1.Sockets
if preferenceSpec != nil && preferenceSpec.CPU != nil && preferenceSpec.CPU.PreferredCPUTopology != nil {
preferredTopology = *preferenceSpec.CPU.PreferredCPUTopology
}
return preferredTopology
}
const defaultSpreadRatio uint32 = 2
func GetSpreadOptions(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec) (uint32, v1beta1.SpreadAcross) {
ratio := defaultSpreadRatio
if preferenceSpec.PreferSpreadSocketToCoreRatio != 0 {
ratio = preferenceSpec.PreferSpreadSocketToCoreRatio
}
across := v1beta1.SpreadAcrossSocketsCores
if preferenceSpec.CPU != nil && preferenceSpec.CPU.SpreadOptions != nil {
if preferenceSpec.CPU.SpreadOptions.Across != nil {
across = *preferenceSpec.CPU.SpreadOptions.Across
}
if preferenceSpec.CPU.SpreadOptions.Ratio != nil {
ratio = *preferenceSpec.CPU.SpreadOptions.Ratio
}
}
return ratio, across
}
//nolint:gocyclo
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/pointer"
)
type field struct {
preference *bool
vmi **bool
}
func ApplyAutoAttachPreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.Devices == nil {
return
}
autoAttachFields := []field{
{preferenceSpec.Devices.PreferredAutoattachGraphicsDevice, &vmiSpec.Domain.Devices.AutoattachGraphicsDevice},
{preferenceSpec.Devices.PreferredAutoattachMemBalloon, &vmiSpec.Domain.Devices.AutoattachMemBalloon},
{preferenceSpec.Devices.PreferredAutoattachPodInterface, &vmiSpec.Domain.Devices.AutoattachPodInterface},
{preferenceSpec.Devices.PreferredAutoattachSerialConsole, &vmiSpec.Domain.Devices.AutoattachSerialConsole},
{preferenceSpec.Devices.PreferredAutoattachInputDevice, &vmiSpec.Domain.Devices.AutoattachInputDevice},
}
for _, field := range autoAttachFields {
if field.preference != nil && *field.vmi == nil {
*field.vmi = pointer.P(*field.preference)
}
}
}
func ApplyDevicePreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.Devices == nil {
return
}
// We only want to apply a preference bool when...
//
// 1. A preference has actually been provided
// 2. The user hasn't defined the corresponding attribute already within the VMI
//
if preferenceSpec.Devices.PreferredUseVirtioTransitional != nil && vmiSpec.Domain.Devices.UseVirtioTransitional == nil {
vmiSpec.Domain.Devices.UseVirtioTransitional = pointer.P(*preferenceSpec.Devices.PreferredUseVirtioTransitional)
}
if preferenceSpec.Devices.PreferredBlockMultiQueue != nil && vmiSpec.Domain.Devices.BlockMultiQueue == nil {
vmiSpec.Domain.Devices.BlockMultiQueue = pointer.P(*preferenceSpec.Devices.PreferredBlockMultiQueue)
}
if preferenceSpec.Devices.PreferredNetworkInterfaceMultiQueue != nil && vmiSpec.Domain.Devices.NetworkInterfaceMultiQueue == nil {
vmiSpec.Domain.Devices.NetworkInterfaceMultiQueue = pointer.P(*preferenceSpec.Devices.PreferredNetworkInterfaceMultiQueue)
}
// FIXME DisableHotplug isn't a pointer bool so we don't have a way to tell if a user has actually set it, for now override.
if preferenceSpec.Devices.PreferredDisableHotplug != nil {
vmiSpec.Domain.Devices.DisableHotplug = *preferenceSpec.Devices.PreferredDisableHotplug
}
if preferenceSpec.Devices.PreferredSoundModel != "" && vmiSpec.Domain.Devices.Sound != nil && vmiSpec.Domain.Devices.Sound.Model == "" {
vmiSpec.Domain.Devices.Sound.Model = preferenceSpec.Devices.PreferredSoundModel
}
if preferenceSpec.Devices.PreferredRng != nil && vmiSpec.Domain.Devices.Rng == nil {
vmiSpec.Domain.Devices.Rng = preferenceSpec.Devices.PreferredRng.DeepCopy()
}
if preferenceSpec.Devices.PreferredTPM != nil && vmiSpec.Domain.Devices.TPM == nil {
vmiSpec.Domain.Devices.TPM = preferenceSpec.Devices.PreferredTPM.DeepCopy()
}
ApplyAutoAttachPreferences(preferenceSpec, vmiSpec)
applyDiskPreferences(preferenceSpec, vmiSpec)
applyInterfacePreferences(preferenceSpec, vmiSpec)
applyInputPreferences(preferenceSpec, vmiSpec)
applyPanicDevicePreferences(preferenceSpec, vmiSpec)
}
func applyInputPreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
for inputIndex := range vmiSpec.Domain.Devices.Inputs {
vmiInput := &vmiSpec.Domain.Devices.Inputs[inputIndex]
if preferenceSpec.Devices.PreferredInputBus != "" && vmiInput.Bus == "" {
vmiInput.Bus = preferenceSpec.Devices.PreferredInputBus
}
if preferenceSpec.Devices.PreferredInputType != "" && vmiInput.Type == "" {
vmiInput.Type = preferenceSpec.Devices.PreferredInputType
}
}
}
func applyPanicDevicePreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.Devices.PreferredPanicDeviceModel == nil {
return
}
// Only apply any preferred panic device when the same panic device has not been provided by a user already
for idx := range vmiSpec.Domain.Devices.PanicDevices {
panicDevice := &vmiSpec.Domain.Devices.PanicDevices[idx]
if panicDevice.Model != nil {
continue
}
panicDevice.Model = preferenceSpec.Devices.PreferredPanicDeviceModel
}
}
//nolint:gocyclo
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/pointer"
)
func applyDiskPreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
for diskIndex := range vmiSpec.Domain.Devices.Disks {
vmiDisk := &vmiSpec.Domain.Devices.Disks[diskIndex]
// If we don't have a target device defined default to a DiskTarget so we can apply preferences
if vmiDisk.DiskDevice.Disk == nil && vmiDisk.DiskDevice.CDRom == nil && vmiDisk.DiskDevice.LUN == nil {
vmiDisk.DiskDevice.Disk = &virtv1.DiskTarget{}
}
if vmiDisk.DiskDevice.Disk != nil {
if preferenceSpec.Devices.PreferredDiskBus != "" && vmiDisk.DiskDevice.Disk.Bus == "" {
vmiDisk.DiskDevice.Disk.Bus = preferenceSpec.Devices.PreferredDiskBus
}
if preferenceSpec.Devices.PreferredDiskBlockSize != nil && vmiDisk.BlockSize == nil {
vmiDisk.BlockSize = preferenceSpec.Devices.PreferredDiskBlockSize.DeepCopy()
}
if preferenceSpec.Devices.PreferredDiskCache != "" && vmiDisk.Cache == "" {
vmiDisk.Cache = preferenceSpec.Devices.PreferredDiskCache
}
if preferenceSpec.Devices.PreferredDiskIO != "" && vmiDisk.IO == "" {
vmiDisk.IO = preferenceSpec.Devices.PreferredDiskIO
}
if preferenceSpec.Devices.PreferredDiskDedicatedIoThread != nil &&
vmiDisk.DedicatedIOThread == nil &&
vmiDisk.DiskDevice.Disk.Bus == virtv1.DiskBusVirtio {
vmiDisk.DedicatedIOThread = pointer.P(*preferenceSpec.Devices.PreferredDiskDedicatedIoThread)
}
} else if vmiDisk.DiskDevice.CDRom != nil {
if preferenceSpec.Devices.PreferredCdromBus != "" && vmiDisk.DiskDevice.CDRom.Bus == "" {
vmiDisk.DiskDevice.CDRom.Bus = preferenceSpec.Devices.PreferredCdromBus
}
} else if vmiDisk.DiskDevice.LUN != nil {
if preferenceSpec.Devices.PreferredLunBus != "" && vmiDisk.DiskDevice.LUN.Bus == "" {
vmiDisk.DiskDevice.LUN.Bus = preferenceSpec.Devices.PreferredLunBus
}
}
}
}
//nolint:gocyclo
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
func applyFeaturePreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.Features == nil {
return
}
if vmiSpec.Domain.Features == nil {
vmiSpec.Domain.Features = &virtv1.Features{}
}
// FIXME vmiSpec.Domain.Features.ACPI isn't a FeatureState pointer so just overwrite if we have a preference for now.
if preferenceSpec.Features.PreferredAcpi != nil {
vmiSpec.Domain.Features.ACPI = *preferenceSpec.Features.PreferredAcpi.DeepCopy()
}
if preferenceSpec.Features.PreferredApic != nil && vmiSpec.Domain.Features.APIC == nil {
vmiSpec.Domain.Features.APIC = preferenceSpec.Features.PreferredApic.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv != nil {
applyHyperVFeaturePreferences(preferenceSpec, vmiSpec)
}
if preferenceSpec.Features.PreferredKvm != nil && vmiSpec.Domain.Features.KVM == nil {
vmiSpec.Domain.Features.KVM = preferenceSpec.Features.PreferredKvm.DeepCopy()
}
if preferenceSpec.Features.PreferredPvspinlock != nil && vmiSpec.Domain.Features.Pvspinlock == nil {
vmiSpec.Domain.Features.Pvspinlock = preferenceSpec.Features.PreferredPvspinlock.DeepCopy()
}
if preferenceSpec.Features.PreferredSmm != nil && vmiSpec.Domain.Features.SMM == nil {
vmiSpec.Domain.Features.SMM = preferenceSpec.Features.PreferredSmm.DeepCopy()
}
}
func applyHyperVFeaturePreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if vmiSpec.Domain.Features.Hyperv == nil {
vmiSpec.Domain.Features.Hyperv = &virtv1.FeatureHyperv{}
}
// TODO clean this up with reflection?
if preferenceSpec.Features.PreferredHyperv.EVMCS != nil && vmiSpec.Domain.Features.Hyperv.EVMCS == nil {
vmiSpec.Domain.Features.Hyperv.EVMCS = preferenceSpec.Features.PreferredHyperv.EVMCS.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.Frequencies != nil && vmiSpec.Domain.Features.Hyperv.Frequencies == nil {
vmiSpec.Domain.Features.Hyperv.Frequencies = preferenceSpec.Features.PreferredHyperv.Frequencies.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.IPI != nil && vmiSpec.Domain.Features.Hyperv.IPI == nil {
vmiSpec.Domain.Features.Hyperv.IPI = preferenceSpec.Features.PreferredHyperv.IPI.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.Reenlightenment != nil && vmiSpec.Domain.Features.Hyperv.Reenlightenment == nil {
vmiSpec.Domain.Features.Hyperv.Reenlightenment = preferenceSpec.Features.PreferredHyperv.Reenlightenment.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.Relaxed != nil && vmiSpec.Domain.Features.Hyperv.Relaxed == nil {
vmiSpec.Domain.Features.Hyperv.Relaxed = preferenceSpec.Features.PreferredHyperv.Relaxed.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.Reset != nil && vmiSpec.Domain.Features.Hyperv.Reset == nil {
vmiSpec.Domain.Features.Hyperv.Reset = preferenceSpec.Features.PreferredHyperv.Reset.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.Runtime != nil && vmiSpec.Domain.Features.Hyperv.Runtime == nil {
vmiSpec.Domain.Features.Hyperv.Runtime = preferenceSpec.Features.PreferredHyperv.Runtime.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.Spinlocks != nil && vmiSpec.Domain.Features.Hyperv.Spinlocks == nil {
vmiSpec.Domain.Features.Hyperv.Spinlocks = preferenceSpec.Features.PreferredHyperv.Spinlocks.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.SyNIC != nil && vmiSpec.Domain.Features.Hyperv.SyNIC == nil {
vmiSpec.Domain.Features.Hyperv.SyNIC = preferenceSpec.Features.PreferredHyperv.SyNIC.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.SyNICTimer != nil && vmiSpec.Domain.Features.Hyperv.SyNICTimer == nil {
vmiSpec.Domain.Features.Hyperv.SyNICTimer = preferenceSpec.Features.PreferredHyperv.SyNICTimer.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.TLBFlush != nil && vmiSpec.Domain.Features.Hyperv.TLBFlush == nil {
vmiSpec.Domain.Features.Hyperv.TLBFlush = preferenceSpec.Features.PreferredHyperv.TLBFlush.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.VAPIC != nil && vmiSpec.Domain.Features.Hyperv.VAPIC == nil {
vmiSpec.Domain.Features.Hyperv.VAPIC = preferenceSpec.Features.PreferredHyperv.VAPIC.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.VPIndex != nil && vmiSpec.Domain.Features.Hyperv.VPIndex == nil {
vmiSpec.Domain.Features.Hyperv.VPIndex = preferenceSpec.Features.PreferredHyperv.VPIndex.DeepCopy()
}
if preferenceSpec.Features.PreferredHyperv.VendorID != nil && vmiSpec.Domain.Features.Hyperv.VendorID == nil {
vmiSpec.Domain.Features.Hyperv.VendorID = preferenceSpec.Features.PreferredHyperv.VendorID.DeepCopy()
}
}
//nolint:gocyclo
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/pointer"
)
func applyFirmwarePreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.Firmware == nil {
return
}
firmware := preferenceSpec.Firmware
if vmiSpec.Domain.Firmware == nil {
vmiSpec.Domain.Firmware = &virtv1.Firmware{}
}
vmiFirmware := vmiSpec.Domain.Firmware
if vmiFirmware.Bootloader == nil {
vmiFirmware.Bootloader = &virtv1.Bootloader{}
}
if firmware.PreferredUseBios != nil &&
*firmware.PreferredUseBios &&
vmiFirmware.Bootloader.BIOS == nil &&
vmiFirmware.Bootloader.EFI == nil {
vmiFirmware.Bootloader.BIOS = &virtv1.BIOS{}
}
if firmware.PreferredUseBiosSerial != nil && vmiFirmware.Bootloader.BIOS != nil && vmiFirmware.Bootloader.BIOS.UseSerial == nil {
vmiFirmware.Bootloader.BIOS.UseSerial = pointer.P(*firmware.PreferredUseBiosSerial)
}
if vmiFirmware.Bootloader.EFI == nil && vmiFirmware.Bootloader.BIOS == nil && firmware.PreferredEfi != nil {
vmiFirmware.Bootloader.EFI = firmware.PreferredEfi.DeepCopy()
// When using PreferredEfi return early to avoid applying DeprecatedPreferredUseEfi or DeprecatedPreferredUseSecureBoot below
return
}
if firmware.DeprecatedPreferredUseEfi != nil &&
*firmware.DeprecatedPreferredUseEfi &&
vmiFirmware.Bootloader.EFI == nil &&
vmiFirmware.Bootloader.BIOS == nil {
vmiFirmware.Bootloader.EFI = &virtv1.EFI{}
}
if firmware.DeprecatedPreferredUseSecureBoot != nil && vmiFirmware.Bootloader.EFI != nil && vmiFirmware.Bootloader.EFI.SecureBoot == nil {
vmiFirmware.Bootloader.EFI.SecureBoot = pointer.P(*firmware.DeprecatedPreferredUseSecureBoot)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
"reflect"
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
func isInterfaceBindingUnset(iface *virtv1.Interface) bool {
return reflect.ValueOf(iface.InterfaceBindingMethod).IsZero() && iface.Binding == nil
}
func isInterfaceOnPodNetwork(interfaceName string, vmiSpec *virtv1.VirtualMachineInstanceSpec) bool {
for _, network := range vmiSpec.Networks {
if network.Name == interfaceName {
return network.Pod != nil
}
}
return false
}
func applyInterfacePreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
for ifaceIndex := range vmiSpec.Domain.Devices.Interfaces {
vmiIface := &vmiSpec.Domain.Devices.Interfaces[ifaceIndex]
if preferenceSpec.Devices.PreferredInterfaceModel != "" && vmiIface.Model == "" {
vmiIface.Model = preferenceSpec.Devices.PreferredInterfaceModel
}
if preferenceSpec.Devices.PreferredInterfaceMasquerade != nil &&
isInterfaceBindingUnset(vmiIface) &&
isInterfaceOnPodNetwork(vmiIface.Name, vmiSpec) {
vmiIface.Masquerade = preferenceSpec.Devices.PreferredInterfaceMasquerade.DeepCopy()
}
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
func applyMachinePreferences(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.Machine == nil {
return
}
if preferenceSpec.Machine.PreferredMachineType != "" {
if vmiSpec.Domain.Machine == nil {
vmiSpec.Domain.Machine = &virtv1.Machine{}
}
if vmiSpec.Domain.Machine.Type == "" {
vmiSpec.Domain.Machine.Type = preferenceSpec.Machine.PreferredMachineType
}
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
func applySubdomain(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if vmiSpec.Subdomain == "" && preferenceSpec.PreferredSubdomain != nil {
vmiSpec.Subdomain = *preferenceSpec.PreferredSubdomain
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/pointer"
)
func applyTerminationGracePeriodSeconds(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) {
if preferenceSpec.PreferredTerminationGracePeriodSeconds != nil && vmiSpec.TerminationGracePeriodSeconds == nil {
vmiSpec.TerminationGracePeriodSeconds = pointer.P(*preferenceSpec.PreferredTerminationGracePeriodSeconds)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
type vmiApplier struct{}
func New() *vmiApplier {
return &vmiApplier{}
}
func (a *vmiApplier) Apply(
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
vmiMetadata *metav1.ObjectMeta,
) {
if preferenceSpec == nil {
return
}
applyCPUPreferences(preferenceSpec, vmiSpec)
ApplyDevicePreferences(preferenceSpec, vmiSpec)
applyFeaturePreferences(preferenceSpec, vmiSpec)
applyFirmwarePreferences(preferenceSpec, vmiSpec)
applyMachinePreferences(preferenceSpec, vmiSpec)
applyClockPreferences(preferenceSpec, vmiSpec)
applySubdomain(preferenceSpec, vmiSpec)
applyTerminationGracePeriodSeconds(preferenceSpec, vmiSpec)
ApplyArchitecturePreferences(preferenceSpec, vmiSpec)
applyPreferenceAnnotations(preferenceSpec.Annotations, vmiMetadata)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
)
type clusterPreferenceFinder struct {
store cache.Store
virtClient kubecli.KubevirtClient
}
func NewClusterPreferenceFinder(store cache.Store, virtClient kubecli.KubevirtClient) *clusterPreferenceFinder {
return &clusterPreferenceFinder{
store: store,
virtClient: virtClient,
}
}
func (f *clusterPreferenceFinder) FindPreference(vm *virtv1.VirtualMachine) (*v1beta1.VirtualMachineClusterPreference, error) {
if vm.Spec.Preference == nil {
return nil, nil
}
if f.store == nil {
return f.virtClient.VirtualMachineClusterPreference().Get(
context.Background(), vm.Spec.Preference.Name, metav1.GetOptions{})
}
obj, exists, err := f.store.GetByKey(vm.Spec.Preference.Name)
if err != nil {
return nil, err
}
if !exists {
return f.virtClient.VirtualMachineClusterPreference().Get(
context.Background(), vm.Spec.Preference.Name, metav1.GetOptions{})
}
preference, ok := obj.(*v1beta1.VirtualMachineClusterPreference)
if !ok {
return nil, fmt.Errorf("unknown object type found in VirtualMachineClusterPreference informer")
}
return preference, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
)
type preferenceFinder struct {
store cache.Store
virtClient kubecli.KubevirtClient
}
func NewPreferenceFinder(store cache.Store, virtClient kubecli.KubevirtClient) *preferenceFinder {
return &preferenceFinder{
store: store,
virtClient: virtClient,
}
}
func (f *preferenceFinder) FindPreference(vm *virtv1.VirtualMachine) (*v1beta1.VirtualMachinePreference, error) {
if vm.Spec.Preference == nil {
return nil, nil
}
namespacedName := types.NamespacedName{
Namespace: vm.Namespace,
Name: vm.Spec.Preference.Name,
}
if f.store == nil {
return f.virtClient.VirtualMachinePreference(namespacedName.Namespace).Get(
context.Background(), namespacedName.Name, metav1.GetOptions{})
}
obj, exists, err := f.store.GetByKey(namespacedName.String())
if err != nil {
return nil, err
}
if !exists {
return f.virtClient.VirtualMachinePreference(namespacedName.Namespace).Get(
context.Background(), namespacedName.Name, metav1.GetOptions{})
}
preference, ok := obj.(*v1beta1.VirtualMachinePreference)
if !ok {
return nil, fmt.Errorf("unknown object type found in VirtualMachinePreference informer")
}
return preference, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
instancetypeapi "kubevirt.io/api/instancetype"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/kubevirt/pkg/instancetype/find"
)
type controllerRevisionFinder interface {
Find(types.NamespacedName) (*appsv1.ControllerRevision, error)
}
type revisionFinder struct {
controllerRevisionFinder controllerRevisionFinder
}
func NewRevisionFinder(store cache.Store, virtClient kubecli.KubevirtClient) *revisionFinder {
return &revisionFinder{
controllerRevisionFinder: find.NewControllerRevisionFinder(store, virtClient),
}
}
func (f *revisionFinder) FindPreference(vm *virtv1.VirtualMachine) (*appsv1.ControllerRevision, error) {
// Avoid a race with Store() here and use RevisionName if already provided over Whatever is in ControllerRevisionRef
if vm.Spec.Preference != nil && vm.Spec.Preference.RevisionName != "" {
return f.controllerRevisionFinder.Find(types.NamespacedName{
Namespace: vm.Namespace,
Name: vm.Spec.Preference.RevisionName,
})
}
ref := vm.Status.PreferenceRef
if ref != nil && ref.ControllerRevisionRef != nil && ref.ControllerRevisionRef.Name != "" {
cr, err := f.controllerRevisionFinder.Find(types.NamespacedName{
Namespace: vm.Namespace,
Name: ref.ControllerRevisionRef.Name,
})
if err != nil {
return nil, err
}
// Only return the found CR if it is for the referenced instance type
if label, ok := cr.Labels[instancetypeapi.ControllerRevisionObjectNameLabel]; ok && label == vm.Spec.Preference.Name {
return cr, nil
}
}
return nil, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package find
import (
"fmt"
"strings"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
api "kubevirt.io/api/instancetype"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/kubevirt/pkg/instancetype/compatibility"
)
type specFinder struct {
preferenceFinder *preferenceFinder
clusterPreferenceFinder *clusterPreferenceFinder
revisionFinder *revisionFinder
}
func NewSpecFinder(store, clusterStore, revisionStore cache.Store, virtClient kubecli.KubevirtClient) *specFinder {
return &specFinder{
preferenceFinder: NewPreferenceFinder(store, virtClient),
clusterPreferenceFinder: NewClusterPreferenceFinder(clusterStore, virtClient),
revisionFinder: NewRevisionFinder(revisionStore, virtClient),
}
}
const unexpectedKindFmt = "got unexpected kind in PreferenceMatcher: %s"
func (f *specFinder) FindPreference(vm *virtv1.VirtualMachine) (*v1beta1.VirtualMachinePreferenceSpec, error) {
if vm.Spec.Preference == nil {
return nil, nil
}
revision, err := f.revisionFinder.FindPreference(vm)
if err != nil {
return nil, err
}
if revision != nil {
return compatibility.GetPreferenceSpec(revision)
}
switch strings.ToLower(vm.Spec.Preference.Kind) {
case api.SingularPreferenceResourceName, api.PluralPreferenceResourceName:
preference, err := f.preferenceFinder.FindPreference(vm)
if err != nil {
return nil, err
}
return &preference.Spec, nil
case api.ClusterSingularPreferenceResourceName, api.ClusterPluralPreferenceResourceName, "":
clusterPreference, err := f.clusterPreferenceFinder.FindPreference(vm)
if err != nil {
return nil, err
}
return &clusterPreference.Spec, nil
default:
return nil, fmt.Errorf(unexpectedKindFmt, vm.Spec.Preference.Kind)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package requirements
import (
"fmt"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
const (
requiredArchitectureNotUsedErrFmt = "preference requires architecture %s but %s is being requested"
)
func checkArch(preferenceSpec *v1beta1.VirtualMachinePreferenceSpec, vmiSpec *v1.VirtualMachineInstanceSpec) (conflict.Conflicts, error) {
if vmiSpec.Architecture != *preferenceSpec.Requirements.Architecture {
return conflict.Conflicts{conflict.New("spec", "template", "spec", "architecture")},
fmt.Errorf(requiredArchitectureNotUsedErrFmt, *preferenceSpec.Requirements.Architecture, vmiSpec.Architecture)
}
return nil, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package requirements
import (
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
type checker struct{}
func New() *checker {
return &checker{}
}
func (c *checker) Check(
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error) {
if preferenceSpec == nil || preferenceSpec.Requirements == nil {
return nil, nil
}
if preferenceSpec.Requirements.CPU != nil {
if conflicts, err := checkCPU(instancetypeSpec, preferenceSpec, vmiSpec); err != nil {
return conflicts, err
}
}
if preferenceSpec.Requirements.Memory != nil {
if conflicts, err := checkMemory(instancetypeSpec, preferenceSpec, vmiSpec); err != nil {
return conflicts, err
}
}
if preferenceSpec.Requirements.Architecture != nil {
if conflicts, err := checkArch(preferenceSpec, vmiSpec); err != nil {
return conflicts, err
}
}
return nil, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package requirements
import (
"fmt"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
preferenceApply "kubevirt.io/kubevirt/pkg/instancetype/preference/apply"
)
const (
InsufficientInstanceTypeCPUResourcesErrorFmt = "insufficient CPU resources of %d vCPU provided by instance type, preference requires " +
"%d vCPU"
InsufficientVMCPUResourcesErrorFmt = "insufficient CPU resources of %d vCPU provided by VirtualMachine, preference " +
"requires %d vCPU provided as %s"
NoVMCPUResourcesDefinedErrorFmt = "no CPU resources provided by VirtualMachine, preference requires %d vCPU"
)
func checkCPU(
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error) {
if instancetypeSpec != nil {
if instancetypeSpec.CPU.Guest < preferenceSpec.Requirements.CPU.Guest {
return conflict.Conflicts{conflict.New("spec", "instancetype")},
fmt.Errorf(
InsufficientInstanceTypeCPUResourcesErrorFmt, instancetypeSpec.CPU.Guest, preferenceSpec.Requirements.CPU.Guest)
}
return nil, nil
}
if vmiSpec.Domain.CPU == nil {
return conflict.Conflicts{conflict.New("spec", "template", "spec", "domain", "cpu")},
fmt.Errorf(NoVMCPUResourcesDefinedErrorFmt, preferenceSpec.Requirements.CPU.Guest)
}
baseConflict := conflict.New("spec", "template", "spec", "domain", "cpu")
switch preferenceApply.GetPreferredTopology(preferenceSpec) {
case v1beta1.DeprecatedPreferThreads, v1beta1.Threads:
if vmiSpec.Domain.CPU.Threads < preferenceSpec.Requirements.CPU.Guest {
return conflict.Conflicts{baseConflict.NewChild("threads")},
fmt.Errorf(
InsufficientVMCPUResourcesErrorFmt, vmiSpec.Domain.CPU.Threads, preferenceSpec.Requirements.CPU.Guest, "threads")
}
case v1beta1.DeprecatedPreferCores, v1beta1.Cores:
if vmiSpec.Domain.CPU.Cores < preferenceSpec.Requirements.CPU.Guest {
return conflict.Conflicts{baseConflict.NewChild("cores")},
fmt.Errorf(
InsufficientVMCPUResourcesErrorFmt, vmiSpec.Domain.CPU.Cores, preferenceSpec.Requirements.CPU.Guest, "cores")
}
case v1beta1.DeprecatedPreferSockets, v1beta1.Sockets:
if vmiSpec.Domain.CPU.Sockets < preferenceSpec.Requirements.CPU.Guest {
return conflict.Conflicts{baseConflict.NewChild("sockets")},
fmt.Errorf(
InsufficientVMCPUResourcesErrorFmt, vmiSpec.Domain.CPU.Sockets, preferenceSpec.Requirements.CPU.Guest, "sockets")
}
case v1beta1.DeprecatedPreferSpread, v1beta1.Spread:
return checkSpread(preferenceSpec, vmiSpec)
case v1beta1.DeprecatedPreferAny, v1beta1.Any:
cpuResources := vmiSpec.Domain.CPU.Cores * vmiSpec.Domain.CPU.Sockets * vmiSpec.Domain.CPU.Threads
if cpuResources < preferenceSpec.Requirements.CPU.Guest {
return conflict.Conflicts{
baseConflict.NewChild("cores"),
baseConflict.NewChild("sockets"),
baseConflict.NewChild("threads"),
},
fmt.Errorf(InsufficientVMCPUResourcesErrorFmt,
cpuResources, preferenceSpec.Requirements.CPU.Guest, "cores, sockets and threads")
}
}
return nil, nil
}
func checkSpread(
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error) {
var (
vCPUs uint32
conflicts conflict.Conflicts
)
baseConflict := conflict.New("spec", "template", "spec", "domain", "cpu")
_, across := preferenceApply.GetSpreadOptions(preferenceSpec)
switch across {
case v1beta1.SpreadAcrossSocketsCores:
vCPUs = vmiSpec.Domain.CPU.Sockets * vmiSpec.Domain.CPU.Cores
conflicts = conflict.Conflicts{
baseConflict.NewChild("sockets"),
baseConflict.NewChild("cores"),
}
case v1beta1.SpreadAcrossCoresThreads:
vCPUs = vmiSpec.Domain.CPU.Cores * vmiSpec.Domain.CPU.Threads
conflicts = conflict.Conflicts{
baseConflict.NewChild("cores"),
baseConflict.NewChild("threads"),
}
case v1beta1.SpreadAcrossSocketsCoresThreads:
vCPUs = vmiSpec.Domain.CPU.Sockets * vmiSpec.Domain.CPU.Cores * vmiSpec.Domain.CPU.Threads
conflicts = conflict.Conflicts{
baseConflict.NewChild("sockets"),
baseConflict.NewChild("cores"),
baseConflict.NewChild("threads"),
}
}
if vCPUs < preferenceSpec.Requirements.CPU.Guest {
return conflicts, fmt.Errorf(InsufficientVMCPUResourcesErrorFmt, vCPUs, preferenceSpec.Requirements.CPU.Guest, across)
}
return nil, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package requirements
import (
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
"kubevirt.io/kubevirt/pkg/pointer"
)
const (
InsufficientInstanceTypeMemoryResourcesErrorFmt = "insufficient Memory resources of %s provided by instance type, preference requires %s"
InsufficientVMMemoryResourcesErrorFmt = "insufficient Memory resources of %s provided by VirtualMachine, preference requires %s"
)
func checkMemory(
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error) {
errFmt := InsufficientVMMemoryResourcesErrorFmt
errConflict := conflict.New("spec", "template", "spec", "domain", "memory")
providedMemory := pointer.P(resource.MustParse("0Mi"))
if instancetypeSpec != nil {
errConflict = conflict.New("spec", "instancetype")
errFmt = InsufficientInstanceTypeMemoryResourcesErrorFmt
providedMemory = &instancetypeSpec.Memory.Guest
}
if vmiSpec != nil && vmiSpec.Domain.Memory != nil && vmiSpec.Domain.Memory.Guest != nil {
providedMemory = vmiSpec.Domain.Memory.Guest
}
if providedMemory.Cmp(preferenceSpec.Requirements.Memory.Guest) < 0 {
return conflict.Conflicts{errConflict}, fmt.Errorf(errFmt, providedMemory.String(), preferenceSpec.Requirements.Memory.Guest.String())
}
return nil, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package validation
import (
"fmt"
"slices"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
"kubevirt.io/kubevirt/pkg/instancetype/preference/apply"
)
func IsPreferredTopologySupported(topology v1beta1.PreferredCPUTopology) bool {
supportedTopologies := []v1beta1.PreferredCPUTopology{
v1beta1.DeprecatedPreferSockets,
v1beta1.DeprecatedPreferCores,
v1beta1.DeprecatedPreferThreads,
v1beta1.DeprecatedPreferSpread,
v1beta1.DeprecatedPreferAny,
v1beta1.Sockets,
v1beta1.Cores,
v1beta1.Threads,
v1beta1.Spread,
v1beta1.Any,
}
return slices.Contains(supportedTopologies, topology)
}
const (
instancetypeCPUGuestPath = "instancetype.spec.cpu.guest"
spreadAcrossSocketsCoresErrFmt = "%d vCPUs provided by the instance type are not divisible by the " +
"Spec.PreferSpreadSocketToCoreRatio or Spec.CPU.PreferSpreadOptions.Ratio of %d provided by the preference"
spreadAcrossCoresThreadsErrFmt = "%d vCPUs provided by the instance type are not divisible by the number of threads per core %d"
spreadAcrossSocketsCoresThreadsErrFmt = "%d vCPUs provided by the instance type are not divisible by the number of threads per core " +
"%d and Spec.PreferSpreadSocketToCoreRatio or Spec.CPU.PreferSpreadOptions.Ratio of %d"
)
func CheckSpreadCPUTopology(
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
) *conflict.Conflict {
topology := apply.GetPreferredTopology(preferenceSpec)
if instancetypeSpec == nil || (topology != v1beta1.Spread && topology != v1beta1.DeprecatedPreferSpread) {
return nil
}
ratio, across := apply.GetSpreadOptions(preferenceSpec)
switch across {
case v1beta1.SpreadAcrossSocketsCores:
if (instancetypeSpec.CPU.Guest % ratio) > 0 {
return conflict.NewWithMessage(
fmt.Sprintf(spreadAcrossSocketsCoresErrFmt, instancetypeSpec.CPU.Guest, ratio),
instancetypeCPUGuestPath,
)
}
case v1beta1.SpreadAcrossCoresThreads:
if (instancetypeSpec.CPU.Guest % ratio) > 0 {
return conflict.NewWithMessage(
fmt.Sprintf(spreadAcrossCoresThreadsErrFmt, instancetypeSpec.CPU.Guest, ratio),
instancetypeCPUGuestPath,
)
}
case v1beta1.SpreadAcrossSocketsCoresThreads:
const threadsPerCore = 2
if (instancetypeSpec.CPU.Guest%threadsPerCore) > 0 || ((instancetypeSpec.CPU.Guest/threadsPerCore)%ratio) > 0 {
return conflict.NewWithMessage(
fmt.Sprintf(spreadAcrossSocketsCoresThreadsErrFmt, instancetypeSpec.CPU.Guest, threadsPerCore, ratio),
instancetypeCPUGuestPath,
)
}
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package revision
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/runtime"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/compatibility"
)
func Compare(revisionA, revisionB *appsv1.ControllerRevision) (bool, error) {
if err := compatibility.Decode(revisionA); err != nil {
return false, err
}
if err := compatibility.Decode(revisionB); err != nil {
return false, err
}
revisionASpec, err := getSpec(revisionA.Data.Object)
if err != nil {
return false, err
}
revisionBSpec, err := getSpec(revisionB.Data.Object)
if err != nil {
return false, err
}
return equality.Semantic.DeepEqual(revisionASpec, revisionBSpec), nil
}
func getSpec(obj runtime.Object) (interface{}, error) {
switch o := obj.(type) {
case *v1beta1.VirtualMachineInstancetype:
return &o.Spec, nil
case *v1beta1.VirtualMachineClusterInstancetype:
return &o.Spec, nil
case *v1beta1.VirtualMachinePreference:
return &o.Spec, nil
case *v1beta1.VirtualMachineClusterPreference:
return &o.Spec, nil
default:
return nil, fmt.Errorf("unexpected type: %T", obj)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package revision
import (
"k8s.io/client-go/tools/cache"
"kubevirt.io/client-go/kubecli"
)
type revisionHandler struct {
instancetypeStore cache.Store
clusterInstancetypeStore cache.Store
preferenceStore cache.Store
clusterPreferenceStore cache.Store
virtClient kubecli.KubevirtClient
}
func New(
instancetypeStore,
clusterInstancetypeStore,
preferenceStore,
clusterPreferenceStore cache.Store,
virtClient kubecli.KubevirtClient,
) *revisionHandler {
return &revisionHandler{
instancetypeStore: instancetypeStore,
clusterInstancetypeStore: clusterInstancetypeStore,
preferenceStore: preferenceStore,
clusterPreferenceStore: clusterPreferenceStore,
virtClient: virtClient,
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package revision
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
)
func (h *revisionHandler) patchVM(
instancetypeStatusRef, preferenceStatusRef *virtv1.InstancetypeStatusRef,
vm *virtv1.VirtualMachine,
) error {
// Batch any writes to the VirtualMachine into a single PatchStatus() call to avoid races in the controller.
logger := func() *log.FilteredLogger { return log.Log.Object(vm) }
revisionPatch, err := GeneratePatch(instancetypeStatusRef, preferenceStatusRef)
if err != nil || len(revisionPatch) == 0 {
return err
}
if _, err := h.virtClient.VirtualMachine(vm.Namespace).PatchStatus(
context.Background(), vm.Name, types.JSONPatchType, revisionPatch, metav1.PatchOptions{},
); err != nil {
logger().Reason(err).Error("Failed to update VirtualMachine with instancetype and preference ControllerRevision references.")
return err
}
return nil
}
func GeneratePatch(instancetypeStatusRef, preferenceStatusRef *virtv1.InstancetypeStatusRef) ([]byte, error) {
patchSet := patch.New()
if instancetypeStatusRef != nil {
patchSet.AddOption(
patch.WithAdd("/status/instancetypeRef", instancetypeStatusRef),
)
}
if preferenceStatusRef != nil {
patchSet.AddOption(
patch.WithAdd("/status/preferenceRef", preferenceStatusRef),
)
}
if patchSet.IsEmpty() {
return nil, nil
}
return patchSet.GeneratePayload()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package revision
import virtv1 "kubevirt.io/api/core/v1"
func HasControllerRevisionRef(ref *virtv1.InstancetypeStatusRef) bool {
return ref != nil && ref.ControllerRevisionRef != nil && ref.ControllerRevisionRef.Name != ""
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
//nolint:dupl
package revision
import (
"context"
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
virtv1 "kubevirt.io/api/core/v1"
api "kubevirt.io/api/instancetype"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/instancetype/apply"
"kubevirt.io/kubevirt/pkg/instancetype/find"
preferenceFind "kubevirt.io/kubevirt/pkg/instancetype/preference/find"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/util"
)
func (h *revisionHandler) Store(vm *virtv1.VirtualMachine) error {
instancetypeStatusRef, err := h.storeInstancetypeRevision(vm)
if err != nil {
log.Log.Object(vm).Reason(err).Error("Failed to store ControllerRevision of VirtualMachineInstancetypeSpec for the Virtualmachine.")
return err
}
preferenceStatusRef, err := h.storePreferenceRevision(vm)
if err != nil {
log.Log.Object(vm).Reason(err).Error("Failed to store ControllerRevision of VirtualMachinePreferenceSpec for the Virtualmachine.")
return err
}
return h.patchVM(instancetypeStatusRef, preferenceStatusRef, vm)
}
func syncStatusWithMatcher(
vm *virtv1.VirtualMachine,
matcher virtv1.Matcher,
statusRef *virtv1.InstancetypeStatusRef,
createRevisionFunc func(vm *virtv1.VirtualMachine) (*appsv1.ControllerRevision, error),
) error {
var clearControllerRevisionRef bool
matcherName := matcher.GetName()
if matcherName != "" && matcherName != statusRef.Name {
statusRef.Name = matcherName
clearControllerRevisionRef = true
}
matcherKind := matcher.GetKind()
if matcherKind != "" && matcherKind != statusRef.Kind {
statusRef.Kind = matcherKind
clearControllerRevisionRef = true
}
matcherInferFromVolume := matcher.GetInferFromVolume()
if matcherInferFromVolume != "" && matcherInferFromVolume != statusRef.InferFromVolume {
statusRef.InferFromVolume = matcherInferFromVolume
clearControllerRevisionRef = true
}
// If the name, kind or inferFromVolume matcher values have changed we need to clear ControllerRevisionRef to either use RevisionName
// from the matcher or to store a copy of the new resource the matcher is pointing at.
if clearControllerRevisionRef {
statusRef.ControllerRevisionRef = nil
}
syncInferFromVolumeFailurePolicy(matcher, statusRef)
matcherRevisionName := matcher.GetRevisionName()
if matcherRevisionName != "" {
if statusRef.ControllerRevisionRef == nil || statusRef.ControllerRevisionRef.Name != matcherRevisionName {
statusRef.ControllerRevisionRef = &virtv1.ControllerRevisionRef{
Name: matcherRevisionName,
}
}
}
if statusRef.ControllerRevisionRef == nil {
storedRevision, err := createRevisionFunc(vm)
if err != nil {
return err
}
statusRef.ControllerRevisionRef = &virtv1.ControllerRevisionRef{
Name: storedRevision.Name,
}
}
return nil
}
func syncInferFromVolumeFailurePolicy(matcher virtv1.Matcher, statusRef *virtv1.InstancetypeStatusRef) {
matcherInferFromVolumeFailurePolicy := matcher.GetInferFromVolumeFailurePolicy()
if matcherInferFromVolumeFailurePolicy != nil {
if statusRef.InferFromVolumeFailurePolicy == nil || (statusRef.InferFromVolumeFailurePolicy != nil &&
*matcherInferFromVolumeFailurePolicy != *statusRef.InferFromVolumeFailurePolicy) {
statusRef.InferFromVolumeFailurePolicy = pointer.P(*matcherInferFromVolumeFailurePolicy)
}
}
}
func (h *revisionHandler) storeInstancetypeRevision(vm *virtv1.VirtualMachine) (*virtv1.InstancetypeStatusRef, error) {
if vm.Spec.Instancetype == nil {
return nil, nil
}
if vm.Status.InstancetypeRef == nil {
vm.Status.InstancetypeRef = &virtv1.InstancetypeStatusRef{}
}
statusRef := vm.Status.InstancetypeRef.DeepCopy()
if err := syncStatusWithMatcher(vm, vm.Spec.Instancetype, statusRef, h.createInstancetypeRevision); err != nil {
return nil, err
}
if equality.Semantic.DeepEqual(vm.Status.InstancetypeRef, statusRef) {
return nil, nil
}
vm.Status.InstancetypeRef = statusRef
return vm.Status.InstancetypeRef, nil
}
func (h *revisionHandler) createInstancetypeRevision(vm *virtv1.VirtualMachine) (*appsv1.ControllerRevision, error) {
switch strings.ToLower(vm.Spec.Instancetype.Kind) {
case api.SingularResourceName, api.PluralResourceName:
instancetype, err := find.NewInstancetypeFinder(h.instancetypeStore, h.virtClient).Find(vm)
if err != nil {
return nil, err
}
// There is still a window where the instancetype can be updated between the VirtualMachine validation webhook accepting
// the VirtualMachine and the VirtualMachine controller creating a ControllerRevison. As such we need to check one final
// time that there are no conflicts when applying the instancetype to the VirtualMachine before continuing.
if err := h.checkForInstancetypeConflicts(&instancetype.Spec, &vm.Spec.Template.Spec, &vm.Spec.Template.ObjectMeta); err != nil {
return nil, err
}
return h.storeControllerRevision(vm, instancetype)
case api.ClusterSingularResourceName, api.ClusterPluralResourceName, "":
clusterInstancetype, err := find.NewClusterInstancetypeFinder(h.clusterInstancetypeStore, h.virtClient).Find(vm)
if err != nil {
return nil, err
}
// There is still a window where the instancetype can be updated between the VirtualMachine validation webhook accepting
// the VirtualMachine and the VirtualMachine controller creating a ControllerRevison. As such we need to check one final
// time that there are no conflicts when applying the instancetype to the VirtualMachine before continuing.
if err := h.checkForInstancetypeConflicts(
&clusterInstancetype.Spec,
&vm.Spec.Template.Spec,
&vm.Spec.Template.ObjectMeta,
); err != nil {
return nil, err
}
return h.storeControllerRevision(vm, clusterInstancetype)
default:
return nil, fmt.Errorf("got unexpected kind in InstancetypeMatcher: %s", vm.Spec.Instancetype.Kind)
}
}
func (h *revisionHandler) checkForInstancetypeConflicts(
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
vmiMetadata *metav1.ObjectMeta,
) error {
// Apply the instancetype to a copy of the VMISpec as we don't want to persist any changes here in the VM being passed around
vmiSpecCopy := vmiSpec.DeepCopy()
conflicts := apply.NewVMIApplier().ApplyToVMI(field.NewPath("spec", "template", "spec"), instancetypeSpec, nil, vmiSpecCopy, vmiMetadata)
if len(conflicts) > 0 {
return conflicts
}
return nil
}
func (h *revisionHandler) storePreferenceRevision(vm *virtv1.VirtualMachine) (*virtv1.InstancetypeStatusRef, error) {
if vm.Spec.Preference == nil {
return nil, nil
}
if vm.Status.PreferenceRef == nil {
vm.Status.PreferenceRef = &virtv1.InstancetypeStatusRef{}
}
statusRef := vm.Status.PreferenceRef.DeepCopy()
if err := syncStatusWithMatcher(vm, vm.Spec.Preference, statusRef, h.createPreferenceRevision); err != nil {
return nil, err
}
if equality.Semantic.DeepEqual(vm.Status.PreferenceRef, statusRef) {
return nil, nil
}
vm.Status.PreferenceRef = statusRef
return vm.Status.PreferenceRef, nil
}
func (h *revisionHandler) createPreferenceRevision(vm *virtv1.VirtualMachine) (*appsv1.ControllerRevision, error) {
switch strings.ToLower(vm.Spec.Preference.Kind) {
case api.SingularPreferenceResourceName, api.PluralPreferenceResourceName:
preference, err := preferenceFind.NewPreferenceFinder(h.preferenceStore, h.virtClient).FindPreference(vm)
if err != nil {
return nil, err
}
return h.storeControllerRevision(vm, preference)
case api.ClusterSingularPreferenceResourceName, api.ClusterPluralPreferenceResourceName, "":
clusterPreference, err := preferenceFind.NewClusterPreferenceFinder(h.clusterPreferenceStore, h.virtClient).FindPreference(vm)
if err != nil {
return nil, err
}
return h.storeControllerRevision(vm, clusterPreference)
default:
return nil, fmt.Errorf("got unexpected kind in PreferenceMatcher: %s", vm.Spec.Preference.Kind)
}
}
func GenerateName(vmName, resourceName, resourceVersion string, resourceUID types.UID, resourceGeneration int64) string {
return fmt.Sprintf("%s-%s-%s-%s-%d", vmName, resourceName, resourceVersion, resourceUID, resourceGeneration)
}
func CreateControllerRevision(vm *virtv1.VirtualMachine, object runtime.Object) (*appsv1.ControllerRevision, error) {
obj, err := util.GenerateKubeVirtGroupVersionKind(object)
if err != nil {
return nil, err
}
metaObj, ok := obj.(metav1.Object)
if !ok {
return nil, fmt.Errorf("unexpected object format returned from GenerateKubeVirtGroupVersionKind")
}
revisionName := GenerateName(
vm.Name, metaObj.GetName(),
obj.GetObjectKind().GroupVersionKind().Version,
metaObj.GetUID(),
metaObj.GetGeneration(),
)
// Removing unnecessary metadata
metaObj.SetLabels(nil)
metaObj.SetAnnotations(nil)
metaObj.SetFinalizers(nil)
metaObj.SetOwnerReferences(nil)
metaObj.SetManagedFields(nil)
return &appsv1.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{
Name: revisionName,
Namespace: vm.Namespace,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(vm, virtv1.VirtualMachineGroupVersionKind)},
Labels: map[string]string{
api.ControllerRevisionObjectGenerationLabel: fmt.Sprintf("%d", metaObj.GetGeneration()),
api.ControllerRevisionObjectKindLabel: obj.GetObjectKind().GroupVersionKind().Kind,
api.ControllerRevisionObjectNameLabel: metaObj.GetName(),
api.ControllerRevisionObjectUIDLabel: string(metaObj.GetUID()),
api.ControllerRevisionObjectVersionLabel: obj.GetObjectKind().GroupVersionKind().Version,
},
},
Data: runtime.RawExtension{
Object: obj,
},
}, nil
}
func (h *revisionHandler) storeControllerRevision(vm *virtv1.VirtualMachine, object runtime.Object) (*appsv1.ControllerRevision, error) {
revision, err := CreateControllerRevision(vm, object)
if err != nil {
return nil, err
}
createdRevision, err := h.virtClient.AppsV1().ControllerRevisions(revision.Namespace).Create(
context.Background(), revision, metav1.CreateOptions{})
if err != nil {
if !errors.IsAlreadyExists(err) {
return nil, fmt.Errorf("failed to create ControllerRevision: %w", err)
}
// Grab the existing revision to check the data it contains
existingRevision, err := h.virtClient.AppsV1().ControllerRevisions(revision.Namespace).Get(
context.Background(), revision.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get ControllerRevision: %w", err)
}
equal, err := Compare(revision, existingRevision)
if err != nil {
return nil, err
}
if !equal {
return nil, fmt.Errorf("found existing ControllerRevision with unexpected data: %s", revision.Name)
}
return existingRevision, nil
}
return createdRevision, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package upgrade
import (
"context"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
instancetypeapi "kubevirt.io/api/instancetype"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/instancetype/compatibility"
"kubevirt.io/kubevirt/pkg/instancetype/find"
"kubevirt.io/kubevirt/pkg/instancetype/revision"
)
type controllerRevisionFinder interface {
Find(types.NamespacedName) (*appsv1.ControllerRevision, error)
}
type upgrader struct {
controllerRevisionFinder controllerRevisionFinder
virtClient kubecli.KubevirtClient
}
func New(store cache.Store, virtClient kubecli.KubevirtClient) *upgrader {
return &upgrader{
controllerRevisionFinder: find.NewControllerRevisionFinder(store, virtClient),
virtClient: virtClient,
}
}
func (u *upgrader) Upgrade(vm *virtv1.VirtualMachine) error {
if vm.Spec.Instancetype == nil && vm.Spec.Preference == nil {
return nil
}
vmPatchSet := patch.New()
newInstancetypeCR, err := u.upgradeInstancetypeCR(vm, vmPatchSet)
if err != nil {
return err
}
newPreferenceCR, err := u.upgradePreferenceCR(vm, vmPatchSet)
if err != nil {
return err
}
if vmPatchSet.IsEmpty() {
return nil
}
patchPayload, err := vmPatchSet.GeneratePayload()
if err != nil {
return err
}
if _, err := u.virtClient.VirtualMachine(vm.Namespace).PatchStatus(
context.Background(), vm.Name, types.JSONPatchType, patchPayload, metav1.PatchOptions{}); err != nil {
return err
}
if newInstancetypeCR != nil {
if err := u.virtClient.AppsV1().ControllerRevisions(vm.Namespace).Delete(
context.Background(), vm.Status.InstancetypeRef.ControllerRevisionRef.Name, metav1.DeleteOptions{}); err != nil {
log.Log.Object(vm).Reason(err).Error("ignoring failure to delete ControllerRevision during stashed instance type object upgrade")
}
vm.Status.InstancetypeRef.ControllerRevisionRef.Name = newInstancetypeCR.Name
}
if newPreferenceCR != nil {
if err := u.virtClient.AppsV1().ControllerRevisions(vm.Namespace).Delete(
context.Background(), vm.Status.PreferenceRef.ControllerRevisionRef.Name, metav1.DeleteOptions{}); err != nil {
log.Log.Object(vm).Reason(err).Error("ignoring failure to delete ControllerRevision during stashed preference object upgrade")
}
vm.Status.PreferenceRef.ControllerRevisionRef.Name = newPreferenceCR.Name
}
log.Log.Object(vm).Info("instancetype.kubevirt.io ControllerRevisions upgrade successful")
return nil
}
func (u *upgrader) upgradeInstancetypeCR(vm *virtv1.VirtualMachine, vmPatchSet *patch.PatchSet) (*appsv1.ControllerRevision, error) {
if vm.Spec.Instancetype == nil || !revision.HasControllerRevisionRef(vm.Status.InstancetypeRef) {
return nil, nil
}
return u.upgradeControllerRevision(
vm, vm.Status.InstancetypeRef.ControllerRevisionRef.Name, "/status/instancetypeRef/controllerRevisionRef/name", vmPatchSet)
}
func (u *upgrader) upgradePreferenceCR(vm *virtv1.VirtualMachine, vmPatchSet *patch.PatchSet) (*appsv1.ControllerRevision, error) {
if vm.Spec.Preference == nil || !revision.HasControllerRevisionRef(vm.Status.PreferenceRef) {
return nil, nil
}
return u.upgradeControllerRevision(
vm, vm.Status.PreferenceRef.ControllerRevisionRef.Name, "/status/preferenceRef/controllerRevisionRef/name", vmPatchSet)
}
func (u *upgrader) upgradeControllerRevision(
vm *virtv1.VirtualMachine,
crName, jsonPath string,
vmPatchSet *patch.PatchSet,
) (*appsv1.ControllerRevision, error) {
original, err := u.controllerRevisionFinder.Find(types.NamespacedName{Namespace: vm.Namespace, Name: crName})
if err != nil {
return nil, err
}
// If the CR is already labeled with the latest version then skip
if IsObjectLatestVersion(original) {
return nil, nil
}
log.Log.Object(vm).Infof("upgrading instancetype.kubevirt.io ControllerRevision %s (%s)", crName, jsonPath)
upgradedCR := original.DeepCopy()
// Upgrade the stashed object to the latest version
err = compatibility.Decode(upgradedCR)
if err != nil {
return nil, err
}
newCR, err := revision.CreateControllerRevision(vm, upgradedCR.Data.Object)
if err != nil {
return nil, err
}
// Recreate the CR with the now upgraded runtime.Object
newCR, err = u.virtClient.AppsV1().ControllerRevisions(vm.Namespace).Create(context.Background(), newCR, metav1.CreateOptions{})
if err != nil {
return nil, err
}
// Add the patches to the VM patchset
vmPatchSet.AddOption(
patch.WithTest(jsonPath, upgradedCR.Name),
patch.WithReplace(jsonPath, newCR.Name),
)
return newCR, nil
}
func IsObjectLatestVersion(cr *appsv1.ControllerRevision) bool {
if version, ok := cr.GetLabels()[instancetypeapi.ControllerRevisionObjectVersionLabel]; ok {
return version == instancetypeapi.LatestVersion
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vm
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/apply"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
"kubevirt.io/kubevirt/pkg/instancetype/find"
preferenceFind "kubevirt.io/kubevirt/pkg/instancetype/preference/find"
"kubevirt.io/kubevirt/pkg/instancetype/preference/requirements"
"kubevirt.io/kubevirt/pkg/instancetype/preference/validation"
)
type instancetypeFinder interface {
Find(*virtv1.VirtualMachine) (*v1beta1.VirtualMachineInstancetypeSpec, error)
}
type preferenceFinder interface {
FindPreference(*virtv1.VirtualMachine) (*v1beta1.VirtualMachinePreferenceSpec, error)
}
type requirementsChecker interface {
Check(*v1beta1.VirtualMachineInstancetypeSpec,
*v1beta1.VirtualMachinePreferenceSpec,
*virtv1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error)
}
type applyVMIHandler interface {
ApplyToVMI(
*k8sfield.Path,
*v1beta1.VirtualMachineInstancetypeSpec,
*v1beta1.VirtualMachinePreferenceSpec,
*virtv1.VirtualMachineInstanceSpec,
*metav1.ObjectMeta,
) conflict.Conflicts
}
type admitter struct {
instancetypeFinder
preferenceFinder
applyVMIHandler
requirementsChecker
}
func NewAdmitter(virtClient kubecli.KubevirtClient) *admitter {
return &admitter{
instancetypeFinder: find.NewSpecFinder(nil, nil, nil, virtClient),
preferenceFinder: preferenceFind.NewSpecFinder(nil, nil, nil, virtClient),
requirementsChecker: requirements.New(),
applyVMIHandler: apply.NewVMIApplier(),
}
}
func (a *admitter) ApplyToVM(vm *virtv1.VirtualMachine) (
*v1beta1.VirtualMachineInstancetypeSpec,
*v1beta1.VirtualMachinePreferenceSpec,
[]metav1.StatusCause,
) {
const ignoreFindFailureWarnFmt = "ignoring err %q when looking for %s"
instancetypeSpec, err := a.Find(vm)
if err != nil {
log.Log.Object(vm).Warningf(ignoreFindFailureWarnFmt, err, "instance type")
}
preferenceSpec, err := a.FindPreference(vm)
if err != nil {
log.Log.Object(vm).Warningf(ignoreFindFailureWarnFmt, err, "preference")
}
if instancetypeSpec == nil && preferenceSpec == nil {
return nil, nil, nil
}
if spreadConflict := validation.CheckSpreadCPUTopology(instancetypeSpec, preferenceSpec); spreadConflict != nil {
return nil, nil, spreadConflict.StatusCauses()
}
conflicts := a.ApplyToVMI(
k8sfield.NewPath("spec", "template", "spec"),
instancetypeSpec,
preferenceSpec,
&vm.Spec.Template.Spec,
&vm.Spec.Template.ObjectMeta,
)
if len(conflicts) > 0 {
return nil, nil, conflicts.StatusCauses()
}
return instancetypeSpec, preferenceSpec, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
//nolint:dupl
package vm
import (
"fmt"
"net/http"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/instancetype/infer"
"kubevirt.io/kubevirt/pkg/instancetype/preference/apply"
preferenceFind "kubevirt.io/kubevirt/pkg/instancetype/preference/find"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
)
type inferHandler interface {
Instancetype(vm *virtv1.VirtualMachine) error
Preference(vm *virtv1.VirtualMachine) error
}
type findPreferenceSpecHandler interface {
FindPreference(vm *virtv1.VirtualMachine) (*v1beta1.VirtualMachinePreferenceSpec, error)
}
type mutator struct {
inferHandler
findPreferenceSpecHandler
}
func NewMutator(virtClient kubecli.KubevirtClient) *mutator {
return &mutator{
inferHandler: infer.New(virtClient),
// TODO(lyarwood): Wire up informers for use here to speed up lookups
findPreferenceSpecHandler: preferenceFind.NewSpecFinder(nil, nil, nil, virtClient),
}
}
func (m *mutator) Mutate(vm, oldVM *virtv1.VirtualMachine, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if response := m.validateMatchers(vm, oldVM, ar); response != nil {
return response
}
if response := m.inferMatchers(vm); response != nil {
return response
}
preferenceSpec, _ := m.FindPreference(vm)
mutateArch(vm, preferenceSpec)
mutateMachineType(vm, preferenceSpec)
mutateDataVolumeTemplates(vm, preferenceSpec)
return nil
}
func mutateArch(vm *virtv1.VirtualMachine, preferenceSpec *v1beta1.VirtualMachinePreferenceSpec) {
if vm.Spec.Template == nil {
return
}
apply.ApplyArchitecturePreferences(preferenceSpec, &vm.Spec.Template.Spec)
}
// NOTE(lyarwood): We mutate the preferred machine type value into the VM early
// ahead of existing default mutation code running in the main vm mutation
// webhook.
func mutateMachineType(vm *virtv1.VirtualMachine, preferenceSpec *v1beta1.VirtualMachinePreferenceSpec) {
if vm.Spec.Template == nil {
return
}
if machine := vm.Spec.Template.Spec.Domain.Machine; machine != nil && machine.Type != "" {
return
}
if preferenceSpec != nil && preferenceSpec.Machine != nil && preferenceSpec.Machine.PreferredMachineType != "" {
if vm.Spec.Template.Spec.Domain.Machine == nil {
vm.Spec.Template.Spec.Domain.Machine = &virtv1.Machine{}
}
vm.Spec.Template.Spec.Domain.Machine.Type = preferenceSpec.Machine.PreferredMachineType
}
}
// NOTE(lyarwood): We have to mutate any preferred storage class value into the
// DataVolumeTemplates within the VM as it's obviously too late to do this
// during VMI creation with the rest of the preferred preference values
func mutateDataVolumeTemplates(vm *virtv1.VirtualMachine, preferenceSpec *v1beta1.VirtualMachinePreferenceSpec) {
if preferenceSpec != nil && preferenceSpec.Volumes != nil && preferenceSpec.Volumes.PreferredStorageClassName != "" {
for _, dv := range vm.Spec.DataVolumeTemplates {
if dv.Spec.PVC != nil && dv.Spec.PVC.StorageClassName == nil {
dv.Spec.PVC.StorageClassName = &preferenceSpec.Volumes.PreferredStorageClassName
}
if dv.Spec.Storage != nil && dv.Spec.Storage.StorageClassName == nil {
dv.Spec.Storage.StorageClassName = &preferenceSpec.Volumes.PreferredStorageClassName
}
}
}
}
func (m *mutator) validateMatchers(vm, oldVM *virtv1.VirtualMachine, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
// Validate updates to the {Instancetype,Preference}Matchers
if ar.Request.Operation == admissionv1.Update {
if causes := validateInstancetypeMatcherUpdate(vm.Spec.Instancetype, oldVM.Spec.Instancetype); len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
if causes := validatePreferenceMatcherUpdate(vm.Spec.Preference, oldVM.Spec.Preference); len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
}
// Validate the InstancetypeMatcher before proceeding, the schema check above isn't enough
// as we need to ensure at least one of the optional Name or InferFromVolume attributes are present.
if causes := validateInstancetypeMatcher(vm); len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
if causes := validatePreferenceMatcher(vm); len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
return nil
}
func validateInstancetypeMatcherUpdate(oldInstancetypeMatcher, newInstancetypeMatcher *virtv1.InstancetypeMatcher) []metav1.StatusCause {
// Allow updates introducing or removing the matchers
if oldInstancetypeMatcher == nil || newInstancetypeMatcher == nil {
return nil
}
if err := validateMatcherUpdate(oldInstancetypeMatcher, newInstancetypeMatcher); err != nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: err.Error(),
Field: k8sfield.NewPath("spec", "instancetype", "revisionName").String(),
}}
}
return nil
}
func validatePreferenceMatcherUpdate(oldPreferenceMatcher, newPreferenceMatcher *virtv1.PreferenceMatcher) []metav1.StatusCause {
// Allow updates introducing or removing the matchers
if oldPreferenceMatcher == nil || newPreferenceMatcher == nil {
return nil
}
if err := validateMatcherUpdate(oldPreferenceMatcher, newPreferenceMatcher); err != nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: err.Error(),
Field: k8sfield.NewPath("spec", "preference", "revisionName").String(),
}}
}
return nil
}
func validateMatcherUpdate(oldMatcher, newMatcher virtv1.Matcher) error {
// Do not check anything when the original matcher didn't have a revisionName as this is likely the VM Controller updating the matcher
if oldMatcher.GetRevisionName() == "" {
return nil
}
// If the matchers have changed ensure that the RevisionName is cleared when updating the Name
if !equality.Semantic.DeepEqual(newMatcher, oldMatcher) {
if oldMatcher.GetName() != newMatcher.GetName() && oldMatcher.GetRevisionName() == newMatcher.GetRevisionName() {
return fmt.Errorf("the Matcher Name has been updated without updating the RevisionName")
}
}
return nil
}
func validateInstancetypeMatcher(vm *virtv1.VirtualMachine) []metav1.StatusCause {
if vm.Spec.Instancetype == nil {
return nil
}
var causes []metav1.StatusCause
if vm.Spec.Instancetype.Name == "" && vm.Spec.Instancetype.InferFromVolume == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotFound,
Message: "Either Name or InferFromVolume should be provided within the InstancetypeMatcher",
Field: k8sfield.NewPath("spec", "instancetype").String(),
})
}
if vm.Spec.Instancetype.InferFromVolume != "" {
if vm.Spec.Instancetype.Name != "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Name already set, should be cleared before setting inferFromVolume",
Field: k8sfield.NewPath("spec", "instancetype", "name").String(),
})
}
if vm.Spec.Instancetype.Kind != "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Kind already set, should be cleared before setting inferFromVolume",
Field: k8sfield.NewPath("spec", "instancetype", "kind").String(),
})
}
}
if vm.Spec.Instancetype.InferFromVolumeFailurePolicy != nil {
failurePolicy := *vm.Spec.Instancetype.InferFromVolumeFailurePolicy
if failurePolicy != virtv1.IgnoreInferFromVolumeFailure && failurePolicy != virtv1.RejectInferFromVolumeFailure {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Invalid value '%s' for InferFromVolumeFailurePolicy", failurePolicy),
Field: k8sfield.NewPath("spec", "instancetype", "inferFromVolumeFailurePolicy").String(),
})
}
}
return causes
}
func validatePreferenceMatcher(vm *virtv1.VirtualMachine) []metav1.StatusCause {
if vm.Spec.Preference == nil {
return nil
}
var causes []metav1.StatusCause
if vm.Spec.Preference.Name == "" && vm.Spec.Preference.InferFromVolume == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotFound,
Message: "Either Name or InferFromVolume should be provided within the PreferenceMatcher",
Field: k8sfield.NewPath("spec", "preference").String(),
})
}
if vm.Spec.Preference.InferFromVolume != "" {
if vm.Spec.Preference.Name != "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Name already set, should be cleared before setting inferFromVolume",
Field: k8sfield.NewPath("spec", "preference", "name").String(),
})
}
if vm.Spec.Preference.Kind != "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Kind already set, should be cleared before setting inferFromVolume",
Field: k8sfield.NewPath("spec", "preference", "kind").String(),
})
}
}
if vm.Spec.Preference.InferFromVolumeFailurePolicy != nil {
failurePolicy := *vm.Spec.Preference.InferFromVolumeFailurePolicy
if failurePolicy != virtv1.IgnoreInferFromVolumeFailure && failurePolicy != virtv1.RejectInferFromVolumeFailure {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Invalid value '%s' for InferFromVolumeFailurePolicy", failurePolicy),
Field: k8sfield.NewPath("spec", "preference", "inferFromVolumeFailurePolicy").String(),
})
}
}
return causes
}
func (m *mutator) inferMatchers(vm *virtv1.VirtualMachine) *admissionv1.AdmissionResponse {
if err := m.inferHandler.Instancetype(vm); err != nil {
log.Log.Reason(err).Error("admission failed, unable to set default instancetype")
return &admissionv1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
Code: http.StatusBadRequest,
},
}
}
if err := m.inferHandler.Preference(vm); err != nil {
log.Log.Reason(err).Error("admission failed, unable to set default preference")
return &admissionv1.AdmissionResponse{
Result: &metav1.Status{
Message: err.Error(),
Code: http.StatusBadRequest,
},
}
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package vm
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
virtv1 "kubevirt.io/api/core/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
)
type admitterStub struct {
ApplyToVMFunc func(*virtv1.VirtualMachine) (
*v1beta1.VirtualMachineInstancetypeSpec,
*v1beta1.VirtualMachinePreferenceSpec,
[]metav1.StatusCause,
)
CheckFunc func(*v1beta1.VirtualMachineInstancetypeSpec,
*v1beta1.VirtualMachinePreferenceSpec,
*virtv1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error)
}
func NewAdmitterStub() *admitterStub {
return &admitterStub{
ApplyToVMFunc: func(*virtv1.VirtualMachine) (
*v1beta1.VirtualMachineInstancetypeSpec,
*v1beta1.VirtualMachinePreferenceSpec,
[]metav1.StatusCause,
) {
return nil, nil, nil
},
CheckFunc: func(*v1beta1.VirtualMachineInstancetypeSpec,
*v1beta1.VirtualMachinePreferenceSpec,
*virtv1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error) {
return nil, nil
},
}
}
func (m *admitterStub) ApplyToVM(vm *virtv1.VirtualMachine) (
*v1beta1.VirtualMachineInstancetypeSpec,
*v1beta1.VirtualMachinePreferenceSpec,
[]metav1.StatusCause,
) {
return m.ApplyToVMFunc(vm)
}
func (m *admitterStub) Check(
instancetypeSpec *v1beta1.VirtualMachineInstancetypeSpec,
preferenceSpec *v1beta1.VirtualMachinePreferenceSpec,
vmiSpec *virtv1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error) {
return m.CheckFunc(instancetypeSpec, preferenceSpec, vmiSpec)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
v1 "kubevirt.io/api/core/v1"
)
// WithAccessCredentialSSHPublicKey adds an AccessCredential that propagates the
// public keys found in secretName to the authorized_keys file of the user with
// name userName via the qemu-guest-agent.
func WithAccessCredentialSSHPublicKey(secretName, userName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.AccessCredentials = append(vmi.Spec.AccessCredentials, v1.AccessCredential{
SSHPublicKey: &v1.SSHPublicKeyAccessCredential{
Source: v1.SSHPublicKeyAccessCredentialSource{
Secret: &v1.AccessCredentialSecretSource{
SecretName: secretName,
},
},
PropagationMethod: v1.SSHPublicKeyAccessCredentialPropagationMethod{
QemuGuestAgent: &v1.QemuGuestAgentSSHPublicKeyAccessCredentialPropagation{
Users: []string{userName},
},
},
},
})
}
}
// WithAccessCredentialUserPassword adds an AccessCredential that propagates the
// user passwords found in secretName via the qemu-guest-agent.
func WithAccessCredentialUserPassword(secretName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.AccessCredentials = append(vmi.Spec.AccessCredentials, v1.AccessCredential{
UserPassword: &v1.UserPasswordAccessCredential{
Source: v1.UserPasswordAccessCredentialSource{
Secret: &v1.AccessCredentialSecretSource{
SecretName: secretName,
},
},
PropagationMethod: v1.UserPasswordAccessCredentialPropagationMethod{
QemuGuestAgent: &v1.QemuGuestAgentUserPasswordAccessCredentialPropagation{},
},
},
})
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import v1 "kubevirt.io/api/core/v1"
func WithClock(clock v1.Clock) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Clock = &clock
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
"kubevirt.io/kubevirt/pkg/libvmi/cloudinit"
v1 "kubevirt.io/api/core/v1"
)
const CloudInitDiskName = "cloudinitdisk"
// WithCloudInitNoCloud adds cloud-init no-cloud sources.
func WithCloudInitNoCloud(opts ...cloudinit.NoCloudOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDiskVolumeWithCloudInitNoCloud(vmi, CloudInitDiskName, v1.DiskBusVirtio)
volume := getVolume(vmi, CloudInitDiskName)
for _, f := range opts {
f(volume.CloudInitNoCloud)
}
}
}
// WithCloudInitConfigDrive adds cloud-init config-drive sources.
func WithCloudInitConfigDrive(opts ...cloudinit.ConfigDriveOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDiskVolumeWithCloudInitConfigDrive(vmi, CloudInitDiskName, v1.DiskBusVirtio)
volume := getVolume(vmi, CloudInitDiskName)
for _, f := range opts {
f(volume.CloudInitConfigDrive)
}
}
}
func addDiskVolumeWithCloudInitConfigDrive(vmi *v1.VirtualMachineInstance, diskName string, bus v1.DiskBus) {
addDisk(vmi, newDisk(diskName, bus))
v := newVolume(diskName)
v.VolumeSource = v1.VolumeSource{CloudInitConfigDrive: &v1.CloudInitConfigDriveSource{}}
addVolume(vmi, v)
}
func addDiskVolumeWithCloudInitNoCloud(vmi *v1.VirtualMachineInstance, diskName string, bus v1.DiskBus) {
addDisk(vmi, newDisk(diskName, bus))
v := newVolume(diskName)
setCloudInitNoCloud(&v, &v1.CloudInitNoCloudSource{})
addVolume(vmi, v)
}
func setCloudInitNoCloud(volume *v1.Volume, source *v1.CloudInitNoCloudSource) {
volume.VolumeSource = v1.VolumeSource{CloudInitNoCloud: source}
}
func GetCloudInitVolume(vmi *v1.VirtualMachineInstance) *v1.Volume {
return getVolume(vmi, CloudInitDiskName)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package cloudinit
import (
"encoding/base64"
k8scorev1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
)
type NoCloudOption func(*v1.CloudInitNoCloudSource)
func WithNoCloudUserData(data string) NoCloudOption {
return func(source *v1.CloudInitNoCloudSource) {
source.UserData = data
}
}
func WithNoCloudEncodedUserData(data string) NoCloudOption {
return func(source *v1.CloudInitNoCloudSource) {
source.UserDataBase64 = base64.StdEncoding.EncodeToString([]byte(data))
}
}
func WithNoCloudUserDataSecretName(secretName string) NoCloudOption {
return func(source *v1.CloudInitNoCloudSource) {
source.UserDataSecretRef = &k8scorev1.LocalObjectReference{Name: secretName}
}
}
func WithNoCloudNetworkData(data string) NoCloudOption {
return func(source *v1.CloudInitNoCloudSource) {
source.NetworkData = data
}
}
func WithNoCloudEncodedNetworkData(data string) NoCloudOption {
return func(source *v1.CloudInitNoCloudSource) {
source.NetworkDataBase64 = base64.StdEncoding.EncodeToString([]byte(data))
}
}
func WithNoCloudNetworkDataSecretName(secretName string) NoCloudOption {
return func(source *v1.CloudInitNoCloudSource) {
source.NetworkDataSecretRef = &k8scorev1.LocalObjectReference{Name: secretName}
}
}
type ConfigDriveOption func(*v1.CloudInitConfigDriveSource)
func WithConfigDriveUserData(data string) ConfigDriveOption {
return func(source *v1.CloudInitConfigDriveSource) {
source.UserData = data
}
}
func WithConfigDriveEncodedUserData(data string) ConfigDriveOption {
return func(source *v1.CloudInitConfigDriveSource) {
source.UserDataBase64 = base64.StdEncoding.EncodeToString([]byte(data))
}
}
func WithConfigDriveNetworkData(data string) ConfigDriveOption {
return func(source *v1.CloudInitConfigDriveSource) {
source.NetworkData = data
}
}
func WithConfigDriveEncodedNetworkData(data string) ConfigDriveOption {
return func(source *v1.CloudInitConfigDriveSource) {
source.NetworkDataBase64 = base64.StdEncoding.EncodeToString([]byte(data))
}
}
func WithConfigDriveUserDataSecretName(secretName string) ConfigDriveOption {
return func(source *v1.CloudInitConfigDriveSource) {
source.UserDataSecretRef = &k8scorev1.LocalObjectReference{Name: secretName}
}
}
func WithConfigDriveNetworkDataSecretName(secretName string) ConfigDriveOption {
return func(source *v1.CloudInitConfigDriveSource) {
source.NetworkDataSecretRef = &k8scorev1.LocalObjectReference{Name: secretName}
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
k8sv1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
)
func WithSecretDisk(secretName, volumeName string) Option {
return WithLabelledSecretDisk(secretName, volumeName, "")
}
func WithSysprepSecret(volumeName, secretName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newSysprepVolume(volumeName, &v1.SysprepSource{
Secret: &k8sv1.LocalObjectReference{
Name: secretName,
},
}))
}
}
func WithLabelledSecretDisk(secretName, volumeName, label string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newSecretVolume(secretName, volumeName, label))
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: volumeName,
})
}
}
func WithConfigMapDisk(configMapName, volumeName string) Option {
return WithLabelledConfigMapDisk(configMapName, volumeName, "")
}
func WithSysprepConfigMap(volumeName, configMapName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newSysprepVolume(volumeName, &v1.SysprepSource{
ConfigMap: &k8sv1.LocalObjectReference{
Name: configMapName,
},
}))
}
}
func WithLabelledConfigMapDisk(configMapName, volumeName, label string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newConfigMapVolume(configMapName, volumeName, label))
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: volumeName,
})
}
}
func WithServiceAccountDisk(name string) Option {
return func(vmi *v1.VirtualMachineInstance) {
const volumeSuffix = "-disk"
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newServiceAccountVolume(name, name+volumeSuffix))
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: name + volumeSuffix,
})
}
}
func WithDownwardAPIDisk(name string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newDownwardAPIVolume(name))
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: name,
})
}
}
func WithConfigMapFs(configMapName, volumeName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newConfigMapVolume(configMapName, volumeName, ""))
vmi.Spec.Domain.Devices.Filesystems = append(vmi.Spec.Domain.Devices.Filesystems, newVirtiofsFilesystem(volumeName))
}
}
func WithSecretFs(secretName, volumeName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newSecretVolume(secretName, volumeName, ""))
vmi.Spec.Domain.Devices.Filesystems = append(vmi.Spec.Domain.Devices.Filesystems, newVirtiofsFilesystem(volumeName))
}
}
func WithServiceAccountFs(serviceAccountName, volumeName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newServiceAccountVolume(serviceAccountName, volumeName))
vmi.Spec.Domain.Devices.Filesystems = append(vmi.Spec.Domain.Devices.Filesystems, newVirtiofsFilesystem(volumeName))
}
}
func WithDownwardAPIFs(name string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, newDownwardAPIVolume(name))
vmi.Spec.Domain.Devices.Filesystems = append(vmi.Spec.Domain.Devices.Filesystems, newVirtiofsFilesystem(name))
}
}
func WithLogSerialConsole(enable bool) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.LogSerialConsole = &enable
}
}
func newSecretVolume(secretName, volumeName, label string) v1.Volume {
return v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
VolumeLabel: label,
},
},
}
}
func newSysprepVolume(volumeName string, source *v1.SysprepSource) v1.Volume {
return v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Sysprep: source,
},
}
}
func newConfigMapVolume(configMapName, volumeName, label string) v1.Volume {
return v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: k8sv1.LocalObjectReference{
Name: configMapName,
},
VolumeLabel: label,
},
},
}
}
func newServiceAccountVolume(serviceAccountName, volumeName string) v1.Volume {
return v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ServiceAccount: &v1.ServiceAccountVolumeSource{
ServiceAccountName: serviceAccountName,
},
},
}
}
func newDownwardAPIVolume(name string) v1.Volume {
return v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
Fields: []k8sv1.DownwardAPIVolumeFile{
{
Path: "labels",
FieldRef: &k8sv1.ObjectFieldSelector{
FieldPath: "metadata.labels",
},
},
},
VolumeLabel: "",
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
)
func WithCPUCount(cores, threads, sockets uint32) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU == nil {
vmi.Spec.Domain.CPU = &v1.CPU{}
}
vmi.Spec.Domain.CPU.Cores = cores
vmi.Spec.Domain.CPU.Threads = threads
vmi.Spec.Domain.CPU.Sockets = sockets
}
}
func WithCPUModel(model string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU == nil {
vmi.Spec.Domain.CPU = &v1.CPU{}
}
vmi.Spec.Domain.CPU.Model = model
}
}
func WithCPUFeature(featureName, policy string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU == nil {
vmi.Spec.Domain.CPU = &v1.CPU{}
}
vmi.Spec.Domain.CPU.Features = append(vmi.Spec.Domain.CPU.Features, v1.CPUFeature{
Name: featureName,
Policy: policy,
})
}
}
func WithDedicatedCPUPlacement() Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU == nil {
vmi.Spec.Domain.CPU = &v1.CPU{}
}
vmi.Spec.Domain.CPU.DedicatedCPUPlacement = true
}
}
func WithRealtimeMask(realtimeMask string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU == nil {
vmi.Spec.Domain.CPU = &v1.CPU{}
}
vmi.Spec.Domain.CPU.Realtime = &v1.Realtime{Mask: realtimeMask}
}
}
func WithNUMAGuestMappingPassthrough() Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU == nil {
vmi.Spec.Domain.CPU = &v1.CPU{}
}
vmi.Spec.Domain.CPU.NUMA = &v1.NUMA{GuestMappingPassthrough: &v1.NUMAGuestMappingPassthrough{}}
}
}
func WithArchitecture(arch string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Architecture = arch
}
}
// Deprecated: Use WithCPURequest instead
// WithResourceCPU specifies the vmi CPU resource.
func WithResourceCPU(value string) Option {
return WithCPURequest(value)
}
// Deprecated: Use WithCPULimit instead
// WithLimitCPU specifies the VMI CPU limit.
func WithLimitCPU(value string) Option {
return WithCPULimit(value)
}
// WithCPURequest specifies the vmi CPU resource.
func WithCPURequest(value string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Resources.Requests == nil {
vmi.Spec.Domain.Resources.Requests = k8sv1.ResourceList{}
}
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceCPU] = resource.MustParse(value)
}
}
// WithCPULimit specifies the VMI CPU limit.
func WithCPULimit(value string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Resources.Limits == nil {
vmi.Spec.Domain.Resources.Limits = k8sv1.ResourceList{}
}
vmi.Spec.Domain.Resources.Limits[k8sv1.ResourceCPU] = resource.MustParse(value)
}
}
func WithIsolateEmulatorThread() Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.CPU == nil {
vmi.Spec.Domain.CPU = &v1.CPU{}
}
vmi.Spec.Domain.CPU.IsolateEmulatorThread = true
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
package libvmi
import (
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/pointer"
)
// WithTablet adds tablet device with given name and bus
func WithTablet(name string, bus v1.InputBus) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.Inputs = append(vmi.Spec.Domain.Devices.Inputs,
v1.Input{
Name: name,
Bus: bus,
Type: v1.InputTypeTablet,
},
)
}
}
func WithAutoattachGraphicsDevice(enable bool) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.AutoattachGraphicsDevice = &enable
}
}
// WithRng adds `rng` to the vmi devices.
func WithRng() Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.Rng = &v1.Rng{}
}
}
// WithWatchdog adds a watchdog to the vmi devices.
func WithWatchdog(action v1.WatchdogAction, arch string) Option {
return func(vmi *v1.VirtualMachineInstance) {
watchdog := &v1.Watchdog{
Name: "watchdog",
}
if arch == "s390x" {
watchdog.WatchdogDevice.Diag288 = &v1.Diag288Watchdog{Action: action}
} else {
watchdog.WatchdogDevice.I6300ESB = &v1.I6300ESBWatchdog{Action: action}
}
vmi.Spec.Domain.Devices.Watchdog = watchdog
}
}
func WithDownwardMetricsVolume(volumeName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
DownwardMetrics: &v1.DownwardMetricsVolumeSource{},
},
})
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: volumeName,
DiskDevice: v1.DiskDevice{
Disk: &v1.DiskTarget{
Bus: v1.DiskBusVirtio,
},
},
})
}
}
func WithDownwardMetricsChannel() Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.DownwardMetrics = &v1.DownwardMetrics{}
}
}
func WithoutSerialConsole() Option {
return func(vmi *v1.VirtualMachineInstance) {
enabled := false
vmi.Spec.Domain.Devices.AutoattachSerialConsole = &enabled
}
}
func WithTPM(persistent bool) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.TPM = &v1.TPMDevice{
Persistent: pointer.P(persistent),
}
}
}
func WithVideo(videoType string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.Video = &v1.VideoDevice{
Type: videoType,
}
}
}
// WithPanicDevice adds a panic device with the given model
func WithPanicDevice(model v1.PanicDeviceModel) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.PanicDevices = append(vmi.Spec.Domain.Devices.PanicDevices, v1.PanicDevice{Model: &model})
}
}
/*
Copyright The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libvmi
import (
"k8s.io/apimachinery/pkg/types"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/pointer"
)
// WithUefi configures EFI bootloader and SecureBoot.
func WithUefi(secureBoot bool) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Firmware == nil {
vmi.Spec.Domain.Firmware = &v1.Firmware{}
}
if vmi.Spec.Domain.Firmware.Bootloader == nil {
vmi.Spec.Domain.Firmware.Bootloader = &v1.Bootloader{}
}
if vmi.Spec.Domain.Firmware.Bootloader.EFI == nil {
vmi.Spec.Domain.Firmware.Bootloader.EFI = &v1.EFI{}
}
vmi.Spec.Domain.Firmware.Bootloader.EFI.SecureBoot = pointer.P(secureBoot)
// secureBoot Requires SMM to be enabled
if secureBoot {
if vmi.Spec.Domain.Features == nil {
vmi.Spec.Domain.Features = &v1.Features{}
}
if vmi.Spec.Domain.Features.SMM == nil {
vmi.Spec.Domain.Features.SMM = &v1.FeatureState{}
}
vmi.Spec.Domain.Features.SMM.Enabled = pointer.P(secureBoot)
}
}
}
func WithKernelBootContainer(imageName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.Firmware = &v1.Firmware{
KernelBoot: &v1.KernelBoot{
Container: &v1.KernelBootContainer{
Image: imageName,
},
},
}
}
}
func WithKernelBootContainerImagePullSecret(imagePullSecret string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Firmware == nil {
vmi.Spec.Domain.Firmware = &v1.Firmware{}
}
if vmi.Spec.Domain.Firmware.KernelBoot == nil {
vmi.Spec.Domain.Firmware.KernelBoot = &v1.KernelBoot{}
}
if vmi.Spec.Domain.Firmware.KernelBoot.Container == nil {
vmi.Spec.Domain.Firmware.KernelBoot.Container = &v1.KernelBootContainer{}
}
vmi.Spec.Domain.Firmware.KernelBoot.Container.ImagePullSecret = imagePullSecret
}
}
func WithFirmwareUUID(uid types.UID) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Firmware == nil {
vmi.Spec.Domain.Firmware = &v1.Firmware{}
}
vmi.Spec.Domain.Firmware.UUID = uid
}
}
/*
Copyright The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libvmi
import v1 "kubevirt.io/api/core/v1"
// WithTerminationGracePeriod specifies the termination grace period in seconds.
func WithTerminationGracePeriod(seconds int64) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.TerminationGracePeriodSeconds = &seconds
}
}
func WithEvictionStrategy(evictionStrategy v1.EvictionStrategy) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.EvictionStrategy = &evictionStrategy
}
}
func WithStartStrategy(startStrategy v1.StartStrategy) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.StartStrategy = &startStrategy
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
)
func WithHugepages(pageSize string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Memory == nil {
vmi.Spec.Domain.Memory = &v1.Memory{}
}
vmi.Spec.Domain.Memory.Hugepages = &v1.Hugepages{PageSize: pageSize}
}
}
func WithGuestMemory(memory string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Memory == nil {
vmi.Spec.Domain.Memory = &v1.Memory{}
}
quantity := resource.MustParse(memory)
vmi.Spec.Domain.Memory.Guest = &quantity
}
}
func WithMaxGuest(memory string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Memory == nil {
vmi.Spec.Domain.Memory = &v1.Memory{}
}
quantity := resource.MustParse(memory)
vmi.Spec.Domain.Memory.MaxGuest = &quantity
}
}
// Deprecated: Use WithMemoryRequest instead
// WithResourceMemory specifies the vmi memory resource.
func WithResourceMemory(value string) Option {
return WithMemoryRequest(value)
}
// Deprecated: Use WithMemoryLimit instead
// WithLimitMemory specifies the VMI memory limit.
func WithLimitMemory(value string) Option {
return WithMemoryLimit(value)
}
// WithMemoryRequest specifies the vmi memory resource.
func WithMemoryRequest(value string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Resources.Requests == nil {
vmi.Spec.Domain.Resources.Requests = k8sv1.ResourceList{}
}
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse(value)
}
}
// WithMemoryLimit specifies the VMI memory limit.
func WithMemoryLimit(value string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.Resources.Limits == nil {
vmi.Spec.Domain.Resources.Limits = k8sv1.ResourceList{}
}
vmi.Spec.Domain.Resources.Limits[k8sv1.ResourceMemory] = resource.MustParse(value)
}
}
/*
Copyright The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libvmi
import (
"k8s.io/apimachinery/pkg/types"
v1 "kubevirt.io/api/core/v1"
)
// WithLabel sets a label with specified value
func WithLabel(key, value string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Labels == nil {
vmi.Labels = map[string]string{}
}
vmi.Labels[key] = value
}
}
// WithAnnotation adds an annotation with specified value
func WithAnnotation(key, value string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Annotations == nil {
vmi.Annotations = map[string]string{}
}
vmi.Annotations[key] = value
}
}
func WithName(name string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Name = name
}
}
func WithNamespace(namespace string) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Namespace = namespace
}
}
func WithUID(uid types.UID) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.ObjectMeta.UID = uid
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
kvirtv1 "kubevirt.io/api/core/v1"
)
// WithInterface adds a Domain Device Interface.
func WithInterface(iface kvirtv1.Interface) Option {
return func(vmi *kvirtv1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.Interfaces = append(
vmi.Spec.Domain.Devices.Interfaces, iface,
)
}
}
// WithNetwork adds a network object.
func WithNetwork(network *kvirtv1.Network) Option {
return func(vmi *kvirtv1.VirtualMachineInstance) {
vmi.Spec.Networks = append(vmi.Spec.Networks, *network)
}
}
func WithPasstInterfaceWithPort() Option {
return WithInterface(InterfaceWithPasstBindingPlugin([]kvirtv1.Port{{Port: 1234, Protocol: "TCP"}}...))
}
// InterfaceDeviceWithMasqueradeBinding returns an Interface named "default" with masquerade binding.
func InterfaceDeviceWithMasqueradeBinding(ports ...kvirtv1.Port) kvirtv1.Interface {
return kvirtv1.Interface{
Name: kvirtv1.DefaultPodNetwork().Name,
InterfaceBindingMethod: kvirtv1.InterfaceBindingMethod{
Masquerade: &kvirtv1.InterfaceMasquerade{},
},
Ports: ports,
}
}
// InterfaceDeviceWithBridgeBinding returns an Interface with bridge binding.
func InterfaceDeviceWithBridgeBinding(name string) kvirtv1.Interface {
return kvirtv1.Interface{
Name: name,
InterfaceBindingMethod: kvirtv1.InterfaceBindingMethod{
Bridge: &kvirtv1.InterfaceBridge{},
},
}
}
// InterfaceDeviceWithSRIOVBinding returns an Interface with SRIOV binding.
func InterfaceDeviceWithSRIOVBinding(name string) kvirtv1.Interface {
return kvirtv1.Interface{
Name: name,
InterfaceBindingMethod: kvirtv1.InterfaceBindingMethod{
SRIOV: &kvirtv1.InterfaceSRIOV{},
},
}
}
// InterfaceWithPasstBinding returns an Interface named "default" with passt binding plugin.
func InterfaceWithPasstBindingPlugin(ports ...kvirtv1.Port) kvirtv1.Interface {
const passtBindingName = "passt"
return kvirtv1.Interface{
Name: kvirtv1.DefaultPodNetwork().Name,
Binding: &kvirtv1.PluginBinding{Name: passtBindingName},
Ports: ports,
}
}
// InterfaceWithMacvtapBindingPlugin returns an Interface named "default" with "macvtap" binding plugin.
func InterfaceWithMacvtapBindingPlugin(name string) *kvirtv1.Interface {
const macvtapBindingName = "macvtap"
return &kvirtv1.Interface{
Name: name,
Binding: &kvirtv1.PluginBinding{Name: macvtapBindingName},
}
}
func InterfaceWithBindingPlugin(name string, binding kvirtv1.PluginBinding, ports ...kvirtv1.Port) kvirtv1.Interface {
return kvirtv1.Interface{
Name: name,
Binding: &binding,
Ports: ports,
}
}
// InterfaceWithMac decorates an existing Interface with a MAC address.
func InterfaceWithMac(iface *kvirtv1.Interface, macAddress string) *kvirtv1.Interface {
iface.MacAddress = macAddress
return iface
}
// MultusNetwork returns a Network with the given name, associated to the given nad
func MultusNetwork(name, nadName string) *kvirtv1.Network {
return &kvirtv1.Network{
Name: name,
NetworkSource: kvirtv1.NetworkSource{
Multus: &kvirtv1.MultusNetwork{
NetworkName: nadName,
},
},
}
}
// WithHostname sets the hostname parameter.
func WithHostname(hostname string) Option {
return func(vmi *kvirtv1.VirtualMachineInstance) {
vmi.Spec.Hostname = hostname
}
}
// WithSubdomain sets the subdomain parameter.
func WithSubdomain(subdomain string) Option {
return func(vmi *kvirtv1.VirtualMachineInstance) {
vmi.Spec.Subdomain = subdomain
}
}
// WithAutoAttachPodInterface sets the autoattachPodInterface parameter.
func WithAutoAttachPodInterface(enabled bool) Option {
return func(vmi *kvirtv1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.AutoattachPodInterface = &enabled
}
}
// WithNetworkInterfaceMultiQueue sets the networkInterfaceMultiQueue field.
func WithNetworkInterfaceMultiQueue(enabled bool) Option {
return func(vmi *kvirtv1.VirtualMachineInstance) {
vmi.Spec.Domain.Devices.NetworkInterfaceMultiQueue = &enabled
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
package libvmi
import (
k8sv1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
)
// WithNodeSelectorFor ensures that the VMI gets scheduled on the specified node
func WithNodeSelectorFor(nodeName string) Option {
return WithNodeSelector(k8sv1.LabelHostname, nodeName)
}
// WithNodeSelector ensures that the VMI gets scheduled on a node with specified key/value label
func WithNodeSelector(key, value string) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.NodeSelector == nil {
vmi.Spec.NodeSelector = map[string]string{}
}
vmi.Spec.NodeSelector[key] = value
}
}
func WithNodeAffinityFor(nodeName string) Option {
return WithNodeAffinityForLabel(k8sv1.LabelHostname, nodeName)
}
func WithToleration(toleration k8sv1.Toleration) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Tolerations = append(vmi.Spec.Tolerations, toleration)
}
}
func WithNodeAffinityForLabel(nodeLabelKey, nodeLabelValue string) Option {
return func(vmi *v1.VirtualMachineInstance) {
nodeSelectorTerm := k8sv1.NodeSelectorTerm{
MatchExpressions: []k8sv1.NodeSelectorRequirement{
{Key: nodeLabelKey, Operator: k8sv1.NodeSelectorOpIn, Values: []string{nodeLabelValue}},
},
}
if vmi.Spec.Affinity == nil {
vmi.Spec.Affinity = &k8sv1.Affinity{}
}
if vmi.Spec.Affinity.NodeAffinity == nil {
vmi.Spec.Affinity.NodeAffinity = &k8sv1.NodeAffinity{}
}
if vmi.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
vmi.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &k8sv1.NodeSelector{}
}
if vmi.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms == nil {
vmi.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = []k8sv1.NodeSelectorTerm{}
}
vmi.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(
vmi.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
nodeSelectorTerm,
)
}
}
func WithPreferredPodAffinity(term k8sv1.WeightedPodAffinityTerm) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Affinity == nil {
vmi.Spec.Affinity = &k8sv1.Affinity{}
}
if vmi.Spec.Affinity.PodAffinity == nil {
vmi.Spec.Affinity.PodAffinity = &k8sv1.PodAffinity{}
}
vmi.Spec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
vmi.Spec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, term,
)
}
}
func WithPreferredNodeAffinity(term k8sv1.PreferredSchedulingTerm) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Affinity == nil {
vmi.Spec.Affinity = &k8sv1.Affinity{}
}
if vmi.Spec.Affinity.NodeAffinity == nil {
vmi.Spec.Affinity.NodeAffinity = &k8sv1.NodeAffinity{}
}
vmi.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
vmi.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
term,
)
}
}
/*
Copyright The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libvmi
import v1 "kubevirt.io/api/core/v1"
// WithSEV adds `launchSecurity` with `sev`.
func WithSEV(isESEnabled, isSNPEnabled bool) Option {
if isESEnabled {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.LaunchSecurity = &v1.LaunchSecurity{
SEV: &v1.SEV{
Policy: &v1.SEVPolicy{
EncryptedState: &isESEnabled,
},
},
}
}
} else if isSNPEnabled {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.LaunchSecurity = &v1.LaunchSecurity{
SNP: &v1.SEVSNP{},
}
}
}
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.LaunchSecurity = &v1.LaunchSecurity{
SEV: &v1.SEV{},
}
}
}
func WithSEVAttestation() Option {
return func(vmi *v1.VirtualMachineInstance) {
startStrategy := v1.StartStrategyPaused
vmi.Spec.StartStrategy = &startStrategy
if vmi.Spec.Domain.LaunchSecurity == nil {
vmi.Spec.Domain.LaunchSecurity = &v1.LaunchSecurity{}
}
if vmi.Spec.Domain.LaunchSecurity.SEV == nil {
vmi.Spec.Domain.LaunchSecurity.SEV = &v1.SEV{}
}
vmi.Spec.Domain.LaunchSecurity.SEV.Attestation = &v1.SEVAttestation{}
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
"fmt"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/pointer"
)
const (
defaultDiskSize = "1Gi"
)
// WithContainerDisk specifies the disk name and the name of the container image to be used.
func WithContainerDisk(diskName, imageName string, options ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusVirtio, options...))
addVolume(vmi, newContainerVolume(diskName, imageName, ""))
}
}
// WithContainerDiskAndPullPolicy specifies the disk name, the name of the container image and Pull Policy to be used.
func WithContainerDiskAndPullPolicy(diskName, imageName string, imagePullPolicy k8sv1.PullPolicy, diskOpts ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusVirtio, diskOpts...))
addVolume(vmi, newContainerVolume(diskName, imageName, imagePullPolicy))
}
}
// WithContainerSATADisk specifies the disk name and the name of the container image to be used.
func WithContainerSATADisk(diskName, imageName string, diskOpts ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusSATA, diskOpts...))
addVolume(vmi, newContainerVolume(diskName, imageName, ""))
}
}
// WithPersistentVolumeClaim specifies the name of the PersistentVolumeClaim to be used.
func WithPersistentVolumeClaim(diskName, pvcName string, diskOpts ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusVirtio, diskOpts...))
addVolume(vmi, newPersistentVolumeClaimVolume(diskName, pvcName, false))
}
}
// WithHotplugPersistentVolumeClaim specifies the name of the hotpluggable PersistentVolumeClaim to be used.
func WithHotplugPersistentVolumeClaim(diskName, pvcName string, diskOpts ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusVirtio, diskOpts...))
addVolume(vmi, newPersistentVolumeClaimVolume(diskName, pvcName, true))
}
}
// WithEphemeralPersistentVolumeClaim specifies the name of the Ephemeral.PersistentVolumeClaim to be used.
func WithEphemeralPersistentVolumeClaim(diskName, pvcName string, diskOpts ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusSATA, diskOpts...))
addVolume(vmi, newEphemeralPersistentVolumeClaimVolume(diskName, pvcName))
}
}
// WithDataVolume specifies the name of the DataVolume to be used.
func WithDataVolume(diskName, pvcName string, diskOpts ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusVirtio, diskOpts...))
addVolume(vmi, newDataVolume(diskName, pvcName, false))
}
}
// WithHotplugDataVolume specifies the name of the hotpluggable DataVolume to be used.
func WithHotplugDataVolume(diskName, pvcName string, diskOpts ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusSCSI, diskOpts...))
addVolume(vmi, newDataVolume(diskName, pvcName, true))
}
}
// WithEmptyDisk specifies the name of the EmptyDisk to be used.
func WithEmptyDisk(diskName string, bus v1.DiskBus, capacity resource.Quantity, diskOpts ...DiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, bus, diskOpts...))
addVolume(vmi, newEmptyDisk(diskName, capacity))
}
}
// WithCDRom specifies a CDRom drive backed by a PVC to be used.
func WithCDRom(cdRomName string, bus v1.DiskBus, claimName string) Option {
return WithCDRomAndVolume(bus, newPersistentVolumeClaimVolume(cdRomName, claimName, false))
}
// WithCDRomAndVolume specifies a CDRom drive backed by given volume and given bus.
func WithCDRomAndVolume(bus v1.DiskBus, volume v1.Volume) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newCDRom(volume.Name, bus))
addVolume(vmi, volume)
}
}
// WithEmptyCDRom specifies a CDRom drive with no volume
func WithEmptyCDRom(bus v1.DiskBus, name string) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newCDRom(name, bus))
}
}
// WithEphemeralCDRom specifies a CDRom drive to be used.
func WithEphemeralCDRom(cdRomName string, bus v1.DiskBus, claimName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newCDRom(cdRomName, bus))
addVolume(vmi, newContainerVolume(cdRomName, claimName, ""))
}
}
// WithFilesystemPVC specifies a filesystem backed by a PVC to be used.
func WithFilesystemPVC(claimName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
addFilesystem(vmi, newVirtiofsFilesystem(claimName))
addVolume(vmi, newPersistentVolumeClaimVolume(claimName, claimName, false))
}
}
// WithFilesystemDV specifies a filesystem backed by a DV to be used.
func WithFilesystemDV(dataVolumeName string) Option {
return func(vmi *v1.VirtualMachineInstance) {
addFilesystem(vmi, newVirtiofsFilesystem(dataVolumeName))
addVolume(vmi, newDataVolume(dataVolumeName, dataVolumeName, false))
}
}
func WithPersistentVolumeClaimLun(diskName, pvcName string, reservation bool) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newLun(diskName, reservation))
addVolume(vmi, newPersistentVolumeClaimVolume(diskName, pvcName, false))
}
}
func WithHostDisk(diskName, path string, diskType v1.HostDiskType, opts ...HostDiskOption) Option {
var capacity string
if diskType == v1.HostDiskExistsOrCreate {
capacity = defaultDiskSize
}
return WithHostDiskAndCapacity(diskName, path, diskType, capacity, opts...)
}
func WithHostDiskAndCapacity(diskName, path string, diskType v1.HostDiskType, capacity string, opts ...HostDiskOption) Option {
return func(vmi *v1.VirtualMachineInstance) {
addDisk(vmi, newDisk(diskName, v1.DiskBusVirtio))
addVolume(vmi, newHostDisk(diskName, path, diskType, capacity, opts...))
}
}
type HostDiskOption func(v *v1.Volume)
func WithSharedHostDisk(shared bool) HostDiskOption {
return func(v *v1.Volume) {
v.HostDisk.Shared = pointer.P(shared)
}
}
// WithIOThreadsPolicy sets the WithIOThreadPolicy parameter
func WithIOThreadsPolicy(policy v1.IOThreadsPolicy) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.IOThreadsPolicy = &policy
}
}
func WithIOThreads(iothreads v1.DiskIOThreads) Option {
return func(vmi *v1.VirtualMachineInstance) {
vmi.Spec.Domain.IOThreads = pointer.P(iothreads)
}
}
func addDisk(vmi *v1.VirtualMachineInstance, disk v1.Disk) {
if !diskExists(vmi, disk) {
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, disk)
}
}
func addVolume(vmi *v1.VirtualMachineInstance, volume v1.Volume) {
if !volumeExists(vmi, volume) {
vmi.Spec.Volumes = append(vmi.Spec.Volumes, volume)
}
}
func addFilesystem(vmi *v1.VirtualMachineInstance, filesystem v1.Filesystem) {
if filesystemExists(vmi, filesystem) {
panic(fmt.Errorf("filesystem %s already exists", filesystem.Name))
}
vmi.Spec.Domain.Devices.Filesystems = append(vmi.Spec.Domain.Devices.Filesystems, filesystem)
}
func getVolume(vmi *v1.VirtualMachineInstance, name string) *v1.Volume {
for i := range vmi.Spec.Volumes {
if vmi.Spec.Volumes[i].Name == name {
return &vmi.Spec.Volumes[i]
}
}
return nil
}
func diskExists(vmi *v1.VirtualMachineInstance, disk v1.Disk) bool {
for _, d := range vmi.Spec.Domain.Devices.Disks {
if d.Name == disk.Name {
return true
}
}
return false
}
func volumeExists(vmi *v1.VirtualMachineInstance, volume v1.Volume) bool {
for _, v := range vmi.Spec.Volumes {
if v.Name == volume.Name {
return true
}
}
return false
}
func filesystemExists(vmi *v1.VirtualMachineInstance, filesystem v1.Filesystem) bool {
for _, f := range vmi.Spec.Domain.Devices.Filesystems {
if f.Name == filesystem.Name {
return true
}
}
return false
}
type DiskOption func(vm *v1.Disk)
func newDisk(name string, bus v1.DiskBus, opts ...DiskOption) v1.Disk {
d := v1.Disk{
Name: name,
DiskDevice: v1.DiskDevice{
Disk: &v1.DiskTarget{
Bus: bus,
},
},
}
for _, f := range opts {
f(&d)
}
return d
}
func WithDedicatedIOThreads(enabled bool) DiskOption {
return func(d *v1.Disk) {
d.DedicatedIOThread = pointer.P(enabled)
}
}
func newCDRom(name string, bus v1.DiskBus) v1.Disk {
return v1.Disk{
Name: name,
DiskDevice: v1.DiskDevice{
CDRom: &v1.CDRomTarget{
Bus: bus,
},
},
}
}
func newVirtiofsFilesystem(name string) v1.Filesystem {
return v1.Filesystem{
Name: name,
Virtiofs: &v1.FilesystemVirtiofs{},
}
}
func newLun(name string, reservation bool) v1.Disk {
return v1.Disk{
Name: name,
DiskDevice: v1.DiskDevice{
LUN: &v1.LunTarget{
Bus: v1.DiskBusSCSI,
Reservation: reservation,
},
},
}
}
func newVolume(name string) v1.Volume {
return v1.Volume{Name: name}
}
func newContainerVolume(name, image string, imagePullPolicy k8sv1.PullPolicy) v1.Volume {
container := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
ContainerDisk: &v1.ContainerDiskSource{
Image: image,
},
},
}
if imagePullPolicy != "" {
container.ContainerDisk.ImagePullPolicy = imagePullPolicy
}
return container
}
func newPersistentVolumeClaimVolume(name, claimName string, hotpluggable bool) v1.Volume {
return v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
Hotpluggable: hotpluggable,
},
},
}
}
func newEphemeralPersistentVolumeClaimVolume(name, claimName string) v1.Volume {
return v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
},
},
}
}
func newDataVolume(name, dataVolumeName string, hotpluggable bool) v1.Volume {
return v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
DataVolume: &v1.DataVolumeSource{
Name: dataVolumeName,
Hotpluggable: hotpluggable,
},
},
}
}
func newEmptyDisk(name string, capacity resource.Quantity) v1.Volume {
return v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
EmptyDisk: &v1.EmptyDiskSource{
Capacity: capacity,
},
},
}
}
func newHostDisk(name, path string, diskType v1.HostDiskType, capacity string, opts ...HostDiskOption) v1.Volume {
hostDisk := v1.HostDisk{
Path: path,
Type: diskType,
}
// Set capacity if provided
if capacity != "" {
hostDisk.Capacity = resource.MustParse(capacity)
}
v := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
HostDisk: &hostDisk,
},
}
for _, f := range opts {
f(&v)
}
return v
}
func WithSupplementalPoolThreadCount(count uint32) Option {
return func(vmi *v1.VirtualMachineInstance) {
if vmi.Spec.Domain.IOThreads == nil {
vmi.Spec.Domain.IOThreads = &v1.DiskIOThreads{}
}
vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount = pointer.P(count)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
instancetypeapi "kubevirt.io/api/instancetype"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/pointer"
)
type VMOption func(vm *v1.VirtualMachine)
func NewVirtualMachine(vmi *v1.VirtualMachineInstance, opts ...VMOption) *v1.VirtualMachine {
vm := &v1.VirtualMachine{
TypeMeta: metav1.TypeMeta{
APIVersion: v1.GroupVersion.String(),
Kind: "VirtualMachine",
},
ObjectMeta: metav1.ObjectMeta{
Name: vmi.Name,
Namespace: vmi.Namespace,
},
Spec: v1.VirtualMachineSpec{
RunStrategy: pointer.P(v1.RunStrategyHalted),
Template: &v1.VirtualMachineInstanceTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: vmi.ObjectMeta.Annotations,
Labels: vmi.ObjectMeta.Labels,
},
Spec: vmi.Spec,
},
},
}
for _, f := range opts {
f(vm)
}
return vm
}
func WithAnnotations(annotations map[string]string) VMOption {
return func(vm *v1.VirtualMachine) {
if vm.Annotations == nil {
vm.Annotations = annotations
}
for key, val := range annotations {
vm.Annotations[key] = val
}
}
}
func WithLabels(labels map[string]string) VMOption {
return func(vm *v1.VirtualMachine) {
if vm.Labels == nil {
vm.Labels = labels
}
for key, val := range labels {
vm.Labels[key] = val
}
}
}
func WithRunStrategy(strategy v1.VirtualMachineRunStrategy) VMOption {
return func(vm *v1.VirtualMachine) {
vm.Spec.RunStrategy = &strategy
}
}
func WithDataVolumeTemplate(datavolume *cdiv1.DataVolume) VMOption {
return func(vm *v1.VirtualMachine) {
vm.Spec.DataVolumeTemplates = append(vm.Spec.DataVolumeTemplates,
v1.DataVolumeTemplateSpec{
ObjectMeta: datavolume.ObjectMeta,
Spec: datavolume.Spec,
},
)
}
}
func resourcesRemovedFromVMI(vmiSpec *v1.VirtualMachineInstanceSpec) {
vmiSpec.Domain.CPU = nil
vmiSpec.Domain.Memory = nil
vmiSpec.Domain.Resources = v1.ResourceRequirements{}
}
func preferencesRemovedFromVMI(vmiSpec *v1.VirtualMachineInstanceSpec) {
vmiSpec.TerminationGracePeriodSeconds = nil
vmiSpec.Domain.Features = nil
vmiSpec.Domain.Machine = nil
for diskIndex := range vmiSpec.Domain.Devices.Disks {
disk := vmiSpec.Domain.Devices.Disks[diskIndex].DiskDevice.Disk
if disk != nil && disk.Bus != "" {
disk.Bus = ""
}
}
}
func WithClusterInstancetype(name string) VMOption {
return func(vm *v1.VirtualMachine) {
resourcesRemovedFromVMI(&vm.Spec.Template.Spec)
vm.Spec.Instancetype = &v1.InstancetypeMatcher{
Name: name,
}
}
}
func WithClusterPreference(name string) VMOption {
return func(vm *v1.VirtualMachine) {
preferencesRemovedFromVMI(&vm.Spec.Template.Spec)
vm.Spec.Preference = &v1.PreferenceMatcher{
Name: name,
}
}
}
func WithInstancetype(name string) VMOption {
return func(vm *v1.VirtualMachine) {
resourcesRemovedFromVMI(&vm.Spec.Template.Spec)
vm.Spec.Instancetype = &v1.InstancetypeMatcher{
Name: name,
Kind: instancetypeapi.SingularResourceName,
}
}
}
func WithPreference(name string) VMOption {
return func(vm *v1.VirtualMachine) {
preferencesRemovedFromVMI(&vm.Spec.Template.Spec)
vm.Spec.Preference = &v1.PreferenceMatcher{
Name: name,
Kind: instancetypeapi.SingularPreferenceResourceName,
}
}
}
func WithInstancetypeInferredFromVolume(name string) VMOption {
return func(vm *v1.VirtualMachine) {
resourcesRemovedFromVMI(&vm.Spec.Template.Spec)
vm.Spec.Instancetype = &v1.InstancetypeMatcher{
InferFromVolume: name,
}
}
}
func WithPreferenceInferredFromVolume(name string) VMOption {
return func(vm *v1.VirtualMachine) {
preferencesRemovedFromVMI(&vm.Spec.Template.Spec)
vm.Spec.Preference = &v1.PreferenceMatcher{
InferFromVolume: name,
}
}
}
func WithInstancetypeRevision(revisionName string) VMOption {
return func(vm *v1.VirtualMachine) {
resourcesRemovedFromVMI(&vm.Spec.Template.Spec)
vm.Spec.Instancetype = &v1.InstancetypeMatcher{
Name: "unused",
RevisionName: revisionName,
}
}
}
func WithPreferenceRevision(revisionName string) VMOption {
return func(vm *v1.VirtualMachine) {
preferencesRemovedFromVMI(&vm.Spec.Template.Spec)
vm.Spec.Preference = &v1.PreferenceMatcher{
Name: "unused",
RevisionName: revisionName,
}
}
}
func WithUpdateVolumeStrategy(strategy v1.UpdateVolumesStrategy) VMOption {
return func(vm *v1.VirtualMachine) {
vm.Spec.UpdateVolumesStrategy = pointer.P(strategy)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package libvmi
import (
k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
v1 "kubevirt.io/api/core/v1"
)
// Option represents an action that enables an option.
type Option func(vmi *v1.VirtualMachineInstance)
// New instantiates a new VMI configuration,
// building its properties based on the specified With* options.
func New(opts ...Option) *v1.VirtualMachineInstance {
vmi := baseVmi(randName())
for _, f := range opts {
f(vmi)
}
return vmi
}
var defaultOptions []Option
func RegisterDefaultOption(opt Option) {
defaultOptions = append(defaultOptions, opt)
}
// randName returns a random name for a virtual machine
func randName() string {
const randomPostfixLen = 5
return "testvmi" + "-" + rand.String(randomPostfixLen)
}
func baseVmi(name string) *v1.VirtualMachineInstance {
vmi := &v1.VirtualMachineInstance{
ObjectMeta: k8smetav1.ObjectMeta{
Name: name,
},
TypeMeta: k8smetav1.TypeMeta{
APIVersion: v1.GroupVersion.String(),
Kind: "VirtualMachineInstance",
},
Spec: v1.VirtualMachineInstanceSpec{Domain: v1.DomainSpec{}},
}
for _, opt := range defaultOptions {
opt(vmi)
}
return vmi
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package memory
import (
"fmt"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/converter/vcpu"
"k8s.io/apimachinery/pkg/api/resource"
)
const (
// must be a power of 2 and at least equal
// to the size of a transparent hugepage (2MiB on x84_64).
// Recommended value by QEMU is 2MiB
HotplugBlockAlignmentBytes int64 = 0x200000
// 1GiB, the size of 1Gi HugePages
Hotplug1GHugePagesBlockAlignmentBytes int64 = 0x40000000
// requiredMinGuestMemory is the minimum required memory
// for a VM to have memory hotplug enabled.
//
// The 1GiB mark is chosen as a tradeoff, it is enough
// memory for the guest kernel to allocate its internal data
// structures and to allocate the swiotlb, which is usually
// 64MB. It also means we can memory map all PCI devices
// as they're memory mapped in the first 1Gi (PCI hole).
requiredMinGuestMemory = 0x40000000
)
func ValidateLiveUpdateMemory(vmSpec *v1.VirtualMachineInstanceSpec, maxGuest *resource.Quantity) error {
domain := &vmSpec.Domain
if domain.CPU != nil && domain.CPU.Realtime != nil {
return fmt.Errorf("Memory hotplug is not compatible with realtime VMs")
}
if domain.CPU != nil &&
domain.CPU.NUMA != nil &&
domain.CPU.NUMA.GuestMappingPassthrough != nil {
return fmt.Errorf("Memory hotplug is not compatible with guest mapping passthrough")
}
if domain.LaunchSecurity != nil {
return fmt.Errorf("Memory hotplug is not compatible with encrypted VMs")
}
blockAlignment := HotplugBlockAlignmentBytes
if domain.Memory != nil &&
domain.Memory.Hugepages != nil &&
domain.Memory.Hugepages.PageSize == "1Gi" {
blockAlignment = Hotplug1GHugePagesBlockAlignmentBytes
}
if domain.Memory == nil ||
domain.Memory.Guest == nil {
return fmt.Errorf("Guest memory must be configured when memory hotplug is enabled")
}
if maxGuest == nil {
return fmt.Errorf("Max guest memory must be configured when memory hotplug is enabled")
}
if domain.Memory.Guest.Cmp(*maxGuest) > 0 {
return fmt.Errorf("Guest memory is greater than the configured maxGuest memory")
}
if domain.Memory.Guest.Value()%blockAlignment != 0 {
alignment := resource.NewQuantity(blockAlignment, resource.BinarySI)
return fmt.Errorf("Guest memory must be %s aligned", alignment)
}
if maxGuest.Value()%blockAlignment != 0 {
alignment := resource.NewQuantity(blockAlignment, resource.BinarySI)
return fmt.Errorf("MaxGuest must be %s aligned", alignment)
}
if vmSpec.Architecture != "amd64" {
return fmt.Errorf("Memory hotplug is only available for x86_64 VMs")
}
if domain.Memory.Guest.Value() < requiredMinGuestMemory {
return fmt.Errorf("Memory hotplug is only available for VMs with at least 1Gi of guest memory")
}
return nil
}
func BuildMemoryDevice(vmi *v1.VirtualMachineInstance) (*api.MemoryDevice, error) {
domain := vmi.Spec.Domain
pluggableMemory := domain.Memory.MaxGuest.DeepCopy()
pluggableMemory.Sub(*vmi.Status.Memory.GuestAtBoot)
pluggableMemorySize, err := vcpu.QuantityToByte(pluggableMemory)
if err != nil {
return nil, err
}
requestedHotPlugMemory := domain.Memory.Guest.DeepCopy()
requestedHotPlugMemory.Sub(*vmi.Status.Memory.GuestAtBoot)
pluggableMemoryRequested, err := vcpu.QuantityToByte(requestedHotPlugMemory)
if err != nil {
return nil, err
}
blockAlignment := HotplugBlockAlignmentBytes
if domain.Memory != nil &&
domain.Memory.Hugepages != nil &&
domain.Memory.Hugepages.PageSize == "1Gi" {
blockAlignment = Hotplug1GHugePagesBlockAlignmentBytes
}
return &api.MemoryDevice{
Model: "virtio-mem",
Target: &api.MemoryTarget{
Size: pluggableMemorySize,
Node: "0",
Block: api.Memory{Unit: "b", Value: uint64(blockAlignment)},
Requested: pluggableMemoryRequested,
},
}, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package client
import (
"context"
"net/url"
"time"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
)
type latencyAdapter struct {
m *operatormetrics.HistogramVec
}
func (l *latencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) {
l.m.WithLabelValues(getVerbFromHTTPVerb(u, verb), u.String()).Observe(latency.Seconds())
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package client
import (
"fmt"
"net/http"
"regexp"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/metrics"
"kubevirt.io/client-go/kubecli"
)
var resourceParsingRegexs []*regexp.Regexp
func init() {
metrics.Register(metrics.RegisterOpts{
RequestLatency: &latencyAdapter{requestLatency},
RateLimiterLatency: &latencyAdapter{rateLimiterLatency},
})
}
// RegisterRestConfigHooks adds hooks to the KubeVirt client and should be executed before building its config
func RegisterRestConfigHooks() {
setupResourcesToWatch()
kubecli.RegisterRestConfigHook(addHTTPRoundTripClientMonitoring)
}
func SetupMetrics() error {
return operatormetrics.RegisterMetrics(
restMetrics,
)
}
func setupResourcesToWatch() {
resPat := `[A-Za-z0-9.\-]*`
// watch core k8s apis
resourceParsingRegexs = append(resourceParsingRegexs, regexp.MustCompile(fmt.Sprintf(`/api/%s/watch/namespaces/%s/(?P<resource>%s)`, resPat, resPat, resPat)))
resourceParsingRegexs = append(resourceParsingRegexs, regexp.MustCompile(fmt.Sprintf(`/api/%s/watch/(?P<resource>%s)`, resPat, resPat)))
// watch custom resource apis
resourceParsingRegexs = append(resourceParsingRegexs, regexp.MustCompile(fmt.Sprintf(`/apis/%s/%s/watch/namespaces/%s/(?P<resource>%s)`, resPat, resPat, resPat, resPat)))
resourceParsingRegexs = append(resourceParsingRegexs, regexp.MustCompile(fmt.Sprintf(`/apis/%s/%s/watch/(?P<resource>%s)`, resPat, resPat, resPat)))
// namespaced core k8 apis and namespaced custom apis
resourceParsingRegexs = append(resourceParsingRegexs, regexp.MustCompile(fmt.Sprintf(`/api/%s/namespaces/%s/(?P<resource>%s)`, resPat, resPat, resPat)))
resourceParsingRegexs = append(resourceParsingRegexs, regexp.MustCompile(fmt.Sprintf(`/apis/%s/%s/namespaces/%s/(?P<resource>%s)`, resPat, resPat, resPat, resPat)))
// globally scoped core k8s apis and globally scoped custom apis
resourceParsingRegexs = append(resourceParsingRegexs, regexp.MustCompile(fmt.Sprintf(`/api/%s/(?P<resource>%s)`, resPat, resPat)))
resourceParsingRegexs = append(resourceParsingRegexs, regexp.MustCompile(fmt.Sprintf(`/apis/%s/%s/(?P<resource>%s)`, resPat, resPat, resPat)))
}
func addHTTPRoundTripClientMonitoring(config *rest.Config) {
fn := func(rt http.RoundTripper) http.RoundTripper {
return &rtWrapper{
origRoundTripper: rt,
}
}
config.Wrap(fn)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package client
import (
"net/http"
"net/url"
"strconv"
"strings"
)
const defaultNone = "none"
type rtWrapper struct {
origRoundTripper http.RoundTripper
}
func (r *rtWrapper) RoundTrip(request *http.Request) (response *http.Response, err error) {
var (
status string
host string
)
response, err = r.origRoundTripper.RoundTrip(request)
if err != nil {
status = "<error>"
} else {
status = strconv.Itoa(response.StatusCode)
}
resource, verb := parseURLResourceOperation(request)
if request.URL != nil {
host = request.URL.Host
} else {
host = defaultNone
}
requestResult.WithLabelValues(status, request.Method, host, resource, verb).Add(1)
return response, err
}
func parseURLResourceOperation(request *http.Request) (resource, verb string) {
method := request.Method
if request.URL == nil || method == "" {
return defaultNone, defaultNone
}
resource = findResource(*request.URL)
if resource == "" {
return defaultNone, defaultNone
}
return resource, getVerbFromHTTPVerb(*request.URL, method)
}
func getVerbFromHTTPVerb(u url.URL, methodOrVerb string) (verb string) {
switch methodOrVerb {
case "GET":
verb = determineGetVerb(u)
case "PUT":
verb = "UPDATE"
case "PATCH":
verb = "PATCH"
case "POST":
verb = "CREATE"
case "DELETE":
verb = "DELETE"
default:
verb = methodOrVerb
}
return verb
}
func determineGetVerb(u url.URL) string {
if strings.Contains(u.Path, "/watch/") || u.Query().Get("watch") == "true" {
return "WATCH"
}
if resource := findResource(u); resource == "" {
return "none"
} else if strings.HasSuffix(u.Path, resource) {
return "LIST"
}
return "GET"
}
func findResource(u url.URL) (resource string) {
for _, r := range resourceParsingRegexs {
if match := r.FindStringSubmatch(u.Path); len(match) > 1 {
return match[1]
}
}
return ""
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package labels
import (
"fmt"
"strings"
"sync"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/tools/cache"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
clientutil "kubevirt.io/client-go/util"
)
var (
defaultAllowlist = []string{"*"}
defaultIgnorelist = []string{}
)
const (
configMapName = "kubevirt-vm-labels-config"
)
type Config interface {
ShouldReport(label string) bool
}
type configImpl struct {
mu sync.RWMutex
allowlist []string
ignorelist []string
allowAll bool
}
// New creates a new labels config instance and, if a client is provided,
// starts a watcher to keep it updated from the ConfigMap.
func New(client kubecli.KubevirtClient) (Config, error) {
cfg := &configImpl{
allowlist: append([]string{}, defaultAllowlist...),
ignorelist: append([]string{}, defaultIgnorelist...),
allowAll: true,
}
if err := cfg.startWatcherWithClient(client); err != nil {
return nil, err
}
return cfg, nil
}
func (c *configImpl) startWatcherWithClient(client kubecli.KubevirtClient) error {
if client == nil {
return fmt.Errorf("nil kubevirt client")
}
namespace, err := clientutil.GetNamespace()
if err != nil {
return fmt.Errorf("failed to determine namespace for watcher: %w", err)
}
lw := cache.NewListWatchFromClient(
client.CoreV1().RESTClient(),
"configmaps",
namespace,
fields.OneTermEqualSelector("metadata.name", configMapName),
)
informer := cache.NewSharedIndexInformer(
lw,
&k8sv1.ConfigMap{},
0,
cache.Indexers{},
)
c.attachHandlersToInformer(informer)
stop := make(chan struct{})
go func() {
defer close(stop)
informer.Run(stop)
}()
return nil
}
func (c *configImpl) attachHandlersToInformer(informer cache.SharedIndexInformer) {
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cm, ok := obj.(*k8sv1.ConfigMap)
if !ok {
log.Log.Warningf("vm-labels: Add handler received unexpected object type %T", obj)
return
}
c.updateFromConfigMap(cm)
},
UpdateFunc: func(oldObj, newObj interface{}) {
cm, ok := newObj.(*k8sv1.ConfigMap)
if !ok {
log.Log.Warningf("vm-labels: Update handler received unexpected object type %T", newObj)
return
}
c.updateFromConfigMap(cm)
},
DeleteFunc: func(obj interface{}) {
c.resetToDefaults()
},
})
}
func (c *configImpl) ShouldReport(label string) bool {
c.mu.RLock()
defer c.mu.RUnlock()
if len(c.allowlist) == 0 {
return false
}
for _, ig := range c.ignorelist {
if ig == label {
return false
}
}
if c.allowAll {
return true
}
for _, a := range c.allowlist {
if a == label {
return true
}
}
return false
}
func (c *configImpl) updateFromConfigMap(configMap *k8sv1.ConfigMap) {
if configMap == nil || configMap.Data == nil {
c.resetToDefaults()
return
}
c.mu.Lock()
defer c.mu.Unlock()
if d, ok := configMap.Data["allowlist"]; ok {
c.allowlist = parseLabels(d)
} else {
c.allowlist = append([]string{}, defaultAllowlist...)
}
c.allowAll = false
for _, a := range c.allowlist {
if a == "*" {
c.allowAll = true
break
}
}
if d, ok := configMap.Data["ignorelist"]; ok {
c.ignorelist = parseLabels(d)
} else {
c.ignorelist = append([]string{}, defaultIgnorelist...)
}
}
func (c *configImpl) resetToDefaults() {
c.mu.Lock()
defer c.mu.Unlock()
c.allowlist = append([]string{}, defaultAllowlist...)
c.ignorelist = append([]string{}, defaultIgnorelist...)
c.allowAll = true
}
func parseLabels(data string) []string {
parts := strings.Split(data, ",")
out := make([]string, 0, len(parts))
for _, p := range parts {
s := strings.TrimSpace(p)
if s != "" {
out = append(out, s)
}
}
return out
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package workqueue
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
k8sworkqueue "k8s.io/client-go/util/workqueue"
)
var (
workqueueMetrics = []operatormetrics.Metric{
depth,
adds,
latency,
workDuration,
retries,
longestRunningProcessor,
unfinishedWork,
}
depth = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_workqueue_depth",
Help: "Current depth of workqueue",
},
[]string{"name"},
)
adds = operatormetrics.NewCounterVec(
operatormetrics.MetricOpts{
Name: "kubevirt_workqueue_adds_total",
Help: "Total number of adds handled by workqueue",
},
[]string{"name"},
)
latency = operatormetrics.NewHistogramVec(
operatormetrics.MetricOpts{
Name: "kubevirt_workqueue_queue_duration_seconds",
Help: "How long an item stays in workqueue before being requested.",
},
prometheus.HistogramOpts{
Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10),
},
[]string{"name"},
)
workDuration = operatormetrics.NewHistogramVec(
operatormetrics.MetricOpts{
Name: "kubevirt_workqueue_work_duration_seconds",
Help: "How long in seconds processing an item from workqueue takes.",
},
prometheus.HistogramOpts{
Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10),
},
[]string{"name"},
)
retries = operatormetrics.NewCounterVec(
operatormetrics.MetricOpts{
Name: "kubevirt_workqueue_retries_total",
Help: "Total number of retries handled by workqueue",
},
[]string{"name"},
)
longestRunningProcessor = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_workqueue_longest_running_processor_seconds",
Help: "How many seconds has the longest running processor for workqueue been running.",
},
[]string{"name"},
)
unfinishedWork = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_workqueue_unfinished_work_seconds",
Help: "How many seconds of work has done that is in progress and hasn't " +
"been observed by work_duration. Large values indicate stuck " +
"threads. One can deduce the number of stuck threads by observing " +
"the rate at which this increases.",
},
[]string{"name"},
)
)
type Provider struct{}
func init() {
k8sworkqueue.SetProvider(Provider{})
}
func SetupMetrics() error {
return operatormetrics.RegisterMetrics(workqueueMetrics)
}
func NewPrometheusMetricsProvider() Provider {
return Provider{}
}
func (Provider) NewDepthMetric(name string) k8sworkqueue.GaugeMetric {
return depth.WithLabelValues(name)
}
func (Provider) NewAddsMetric(name string) k8sworkqueue.CounterMetric {
return adds.WithLabelValues(name)
}
func (Provider) NewLatencyMetric(name string) k8sworkqueue.HistogramMetric {
return latency.WithLabelValues(name)
}
func (Provider) NewWorkDurationMetric(name string) k8sworkqueue.HistogramMetric {
return workDuration.WithLabelValues(name)
}
func (Provider) NewRetriesMetric(name string) k8sworkqueue.CounterMetric {
return retries.WithLabelValues(name)
}
func (Provider) NewLongestRunningProcessorSecondsMetric(name string) k8sworkqueue.SettableGaugeMetric {
return longestRunningProcessor.WithLabelValues(name)
}
func (Provider) NewUnfinishedWorkSecondsMetric(name string) k8sworkqueue.SettableGaugeMetric {
return unfinishedWork.WithLabelValues(name)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package virt_api
import (
"time"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
)
var (
connectionMetrics = []operatormetrics.Metric{
activePortForwardTunnels,
activeVNCConnections,
activeConsoleConnections,
activeUSBRedirConnections,
vmiLastConnectionTimestamp,
}
namespaceAndVMILabels = []string{"namespace", "vmi"}
activePortForwardTunnels = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_portforward_active_tunnels",
Help: "Amount of active portforward tunnels, broken down by namespace and vmi name.",
},
namespaceAndVMILabels,
)
activeVNCConnections = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vnc_active_connections",
Help: "Amount of active VNC connections, broken down by namespace and vmi name.",
},
namespaceAndVMILabels,
)
activeConsoleConnections = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_console_active_connections",
Help: "Amount of active Console connections, broken down by namespace and vmi name.",
},
namespaceAndVMILabels,
)
activeUSBRedirConnections = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_usbredir_active_connections",
Help: "Amount of active USB redirection connections, broken down by namespace and vmi name.",
},
namespaceAndVMILabels,
)
vmiLastConnectionTimestamp = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_last_api_connection_timestamp_seconds",
Help: "Virtual Machine Instance last API connection timestamp. Including VNC, console, portforward, SSH and usbredir connections.",
},
namespaceAndVMILabels,
)
)
type Decrementer interface {
Dec()
}
// NewActivePortForwardTunnel increments the metric for active portforward tunnels by one for namespace and name
// and returns a recorder for decrementing it once the tunnel is closed
func NewActivePortForwardTunnel(namespace, name string) Decrementer {
recorder := activePortForwardTunnels.WithLabelValues(namespace, name)
recorder.Inc()
return recorder
}
// NewActiveVNCConnection increments the metric for active VNC connections by one for namespace and name
// and returns a recorder for decrementing it once the connection is closed
func NewActiveVNCConnection(namespace, name string) Decrementer {
recorder := activeVNCConnections.WithLabelValues(namespace, name)
recorder.Inc()
return recorder
}
// NewActiveConsoleConnection increments the metric for active console sessions by one for namespace and name
// and returns a recorder for decrementing it once the connection is closed
func NewActiveConsoleConnection(namespace, name string) Decrementer {
recorder := activeConsoleConnections.WithLabelValues(namespace, name)
recorder.Inc()
return recorder
}
// NewActiveUSBRedirConnection increments the metric for active USB redirection connections by one for namespace
// and name and returns a recorder for decrementing it once the connection is closed
func NewActiveUSBRedirConnection(namespace, name string) Decrementer {
recorder := activeUSBRedirConnections.WithLabelValues(namespace, name)
recorder.Inc()
return recorder
}
func SetVMILastConnectionTimestamp(namespace, name string) {
vmiLastConnectionTimestamp.WithLabelValues(namespace, name).Set(float64(time.Now().Unix()))
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virt_api
import (
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
"kubevirt.io/kubevirt/pkg/monitoring/metrics/common/client"
"kubevirt.io/kubevirt/pkg/monitoring/metrics/common/workqueue"
)
func SetupMetrics() error {
if err := client.SetupMetrics(); err != nil {
return err
}
if err := workqueue.SetupMetrics(); err != nil {
return err
}
return operatormetrics.RegisterMetrics(
connectionMetrics,
vmMetrics,
)
}
func ListMetrics() []operatormetrics.Metric {
return operatormetrics.ListMetrics()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virt_api
import (
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
v1 "kubevirt.io/api/core/v1"
)
var (
vmMetrics = []operatormetrics.Metric{
vmsCreatedCounter,
}
vmsCreatedCounter = operatormetrics.NewCounterVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_created_by_pod_total",
Help: "The total number of VMs created by namespace and virt-api pod, since install.",
},
[]string{"namespace"},
)
)
func NewVMCreated(vm *v1.VirtualMachine) {
vmsCreatedCounter.WithLabelValues(vm.Namespace).Inc()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virt_controller
import (
ioprometheusclient "github.com/prometheus/client_model/go"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
)
var (
componentMetrics = []operatormetrics.Metric{
virtControllerLeading,
virtControllerReady,
}
virtControllerLeading = operatormetrics.NewGauge(
operatormetrics.MetricOpts{
Name: "kubevirt_virt_controller_leading_status",
Help: "Indication for an operating virt-controller.",
},
)
virtControllerReady = operatormetrics.NewGauge(
operatormetrics.MetricOpts{
Name: "kubevirt_virt_controller_ready_status",
Help: "Indication for a virt-controller that is ready to take the lead.",
},
)
)
func GetVirtControllerMetric() (*ioprometheusclient.Metric, error) {
dto := &ioprometheusclient.Metric{}
err := virtControllerLeading.Write(dto)
return dto, err
}
func SetVirtControllerLeading() {
virtControllerLeading.Set(1)
}
func SetVirtControllerReady() {
virtControllerReady.Set(1)
}
func SetVirtControllerNotReady() {
virtControllerReady.Set(0)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package virt_controller
import (
ioprometheusclient "github.com/prometheus/client_model/go"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
)
var (
leaderMetrics = []operatormetrics.Metric{
outdatedVirtualMachineInstanceWorkloads,
}
outdatedVirtualMachineInstanceWorkloads = operatormetrics.NewGauge(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_number_of_outdated",
Help: "Indication for the total number of VirtualMachineInstance workloads that are not running within the most up-to-date version of the virt-launcher environment.",
},
)
)
func SetOutdatedVirtualMachineInstanceWorkloads(value int) {
outdatedVirtualMachineInstanceWorkloads.Set(float64(value))
}
func GetOutdatedVirtualMachineInstanceWorkloads() (int, error) {
dto := &ioprometheusclient.Metric{}
if err := outdatedVirtualMachineInstanceWorkloads.Write(dto); err != nil {
return 0, err
}
return int(dto.GetGauge().GetValue()), nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virt_controller
import (
"fmt"
"time"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/kubevirt/pkg/instancetype/apply"
"kubevirt.io/kubevirt/pkg/instancetype/find"
preferencefind "kubevirt.io/kubevirt/pkg/instancetype/preference/find"
"kubevirt.io/kubevirt/pkg/monitoring/metrics/common/client"
"kubevirt.io/kubevirt/pkg/monitoring/metrics/common/workqueue"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
type vmApplyHandler interface {
ApplyToVM(vm *virtv1.VirtualMachine) error
}
type Indexers struct {
VMIMigration cache.Indexer
KVPod cache.Indexer
}
type Stores struct {
VM cache.Store
VMI cache.Store
PersistentVolumeClaim cache.Store
Instancetype cache.Store
ClusterInstancetype cache.Store
Preference cache.Store
ClusterPreference cache.Store
ControllerRevision cache.Store
}
var (
metrics = [][]operatormetrics.Metric{
componentMetrics,
migrationMetrics,
perfscaleMetrics,
vmSnapshotMetrics,
}
indexers *Indexers
stores *Stores
clusterConfig *virtconfig.ClusterConfig
vmApplier vmApplyHandler
kubevirtClient kubecli.KubevirtClient
)
func SetupMetrics(
metricsIndexers *Indexers,
metricsStores *Stores,
virtClusterConfig *virtconfig.ClusterConfig,
clientset kubecli.KubevirtClient,
) error {
if metricsIndexers == nil {
metricsIndexers = &Indexers{}
}
indexers = metricsIndexers
if metricsStores == nil {
metricsStores = &Stores{}
}
stores = metricsStores
clusterConfig = virtClusterConfig
kubevirtClient = clientset
vmApplier = apply.NewVMApplier(
find.NewSpecFinder(
stores.Instancetype,
stores.ClusterInstancetype,
stores.ControllerRevision,
clientset,
),
preferencefind.NewSpecFinder(
stores.Preference,
stores.ClusterPreference,
stores.ControllerRevision,
clientset,
),
)
if err := client.SetupMetrics(); err != nil {
return err
}
if err := workqueue.SetupMetrics(); err != nil {
return err
}
if err := operatormetrics.RegisterMetrics(metrics...); err != nil {
return err
}
return operatormetrics.RegisterCollector(
migrationStatsCollector,
vmiStatsCollector,
vmStatsCollector,
)
}
func RegisterLeaderMetrics() error {
if err := operatormetrics.RegisterMetrics(leaderMetrics); err != nil {
return err
}
return nil
}
func UpdateVMIMigrationInformer(indexer cache.Indexer) {
if indexers == nil {
indexers = &Indexers{}
}
indexers.VMIMigration = indexer
}
func ListMetrics() []operatormetrics.Metric {
return operatormetrics.ListMetrics()
}
func PhaseTransitionTimeBuckets() []float64 {
return []float64{
0.5 * time.Second.Seconds(),
1 * time.Second.Seconds(),
2 * time.Second.Seconds(),
5 * time.Second.Seconds(),
10 * time.Second.Seconds(),
20 * time.Second.Seconds(),
30 * time.Second.Seconds(),
40 * time.Second.Seconds(),
50 * time.Second.Seconds(),
60 * time.Second.Seconds(),
90 * time.Second.Seconds(),
2 * time.Minute.Seconds(),
3 * time.Minute.Seconds(),
5 * time.Minute.Seconds(),
10 * time.Minute.Seconds(),
20 * time.Minute.Seconds(),
30 * time.Minute.Seconds(),
1 * time.Hour.Seconds(),
}
}
func getTransitionTimeSeconds(oldTime *metav1.Time, newTime *metav1.Time) (float64, error) {
if newTime == nil || oldTime == nil {
// no phase transition timestamp found
return 0.0, fmt.Errorf("missing phase transition timestamp, newTime: %v, oldTime: %v", newTime, oldTime)
}
diffSeconds := newTime.Time.Sub(oldTime.Time).Seconds()
// when transitions are very fast, we can encounter time skew. Make 0 the floor
if diffSeconds < 0 {
diffSeconds = 0.0
}
return diffSeconds, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package virt_controller
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
)
const (
migrationTransTimeErrFmt = "Error encountered during VMI migration transition time histogram calculation: %v"
migrationTransTimeFail = "Failed to get a histogram for a VMI migration lifecycle transition times"
)
var (
migrationMetrics = []operatormetrics.Metric{
vmiMigrationPhaseTransitionTimeFromCreation,
}
vmiMigrationPhaseTransitionTimeFromCreation = operatormetrics.NewHistogramVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migration_phase_transition_time_from_creation_seconds",
Help: "Histogram of VM migration phase transitions duration from creation time in seconds.",
},
prometheus.HistogramOpts{
Buckets: PhaseTransitionTimeBuckets(),
},
[]string{
// phase of the vmi migration
"phase",
},
)
)
func CreateVMIMigrationHandler(informer cache.SharedIndexInformer) error {
_, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldVMIMigration, newVMIMigration interface{}) {
updateVMIMigrationPhaseTransitionTimeFromCreationTime(oldVMIMigration.(*v1.VirtualMachineInstanceMigration), newVMIMigration.(*v1.VirtualMachineInstanceMigration))
},
})
return err
}
func updateVMIMigrationPhaseTransitionTimeFromCreationTime(oldVMIMigration *v1.VirtualMachineInstanceMigration, newVMIMigration *v1.VirtualMachineInstanceMigration) {
if oldVMIMigration == nil || oldVMIMigration.Status.Phase == newVMIMigration.Status.Phase {
return
}
diffSeconds, err := getVMIMigrationTransitionTimeSeconds(newVMIMigration)
if err != nil {
log.Log.V(4).Infof(migrationTransTimeErrFmt, err)
return
}
labels := []string{string(newVMIMigration.Status.Phase)}
histogram, err := vmiMigrationPhaseTransitionTimeFromCreation.GetMetricWithLabelValues(labels...)
if err != nil {
log.Log.Reason(err).Error(migrationTransTimeFail)
return
}
histogram.Observe(diffSeconds)
}
func getVMIMigrationTransitionTimeSeconds(newVMIMigration *v1.VirtualMachineInstanceMigration) (float64, error) {
var oldTime *metav1.Time
var newTime *metav1.Time
oldTime = newVMIMigration.CreationTimestamp.DeepCopy()
for _, transitionTimestamp := range newVMIMigration.Status.PhaseTransitionTimestamps {
if transitionTimestamp.Phase == newVMIMigration.Status.Phase {
newTime = transitionTimestamp.PhaseTransitionTimestamp.DeepCopy()
} else if newTime != nil {
break
}
}
return getTransitionTimeSeconds(oldTime, newTime)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virt_controller
import (
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
k6tv1 "kubevirt.io/api/core/v1"
)
var (
migrationStatsCollector = operatormetrics.Collector{
Metrics: []operatormetrics.Metric{
pendingMigrations,
schedulingMigrations,
unsetMigration,
runningMigrations,
succeededMigration,
failedMigration,
},
CollectCallback: migrationStatsCollectorCallback,
}
pendingMigrations = operatormetrics.NewGauge(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migrations_in_pending_phase",
Help: "Number of current pending migrations.",
},
)
schedulingMigrations = operatormetrics.NewGauge(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migrations_in_scheduling_phase",
Help: "Number of current scheduling migrations.",
},
)
unsetMigration = operatormetrics.NewGauge(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migrations_in_unset_phase",
Help: "Number of current unset migrations. These are pending items the virt-controller hasn’t processed yet from the queue.",
},
)
runningMigrations = operatormetrics.NewGauge(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migrations_in_running_phase",
Help: "Number of current running migrations.",
},
)
succeededMigration = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migration_succeeded",
Help: "Indicates if the VMI migration succeeded.",
},
[]string{"vmi", "vmim", "namespace"},
)
failedMigration = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migration_failed",
Help: "Indicates if the VMI migration failed.",
},
[]string{"vmi", "vmim", "namespace"},
)
)
func migrationStatsCollectorCallback() []operatormetrics.CollectorResult {
cachedObjs := indexers.VMIMigration.List()
vmims := make([]*k6tv1.VirtualMachineInstanceMigration, len(cachedObjs))
for i, obj := range cachedObjs {
vmims[i] = obj.(*k6tv1.VirtualMachineInstanceMigration)
}
return reportMigrationStats(vmims)
}
func reportMigrationStats(vmims []*k6tv1.VirtualMachineInstanceMigration) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
pendingCount := 0
schedulingCount := 0
unsetCount := 0
runningCount := 0
for _, vmim := range vmims {
switch vmim.Status.Phase {
case k6tv1.MigrationPending:
pendingCount++
case k6tv1.MigrationScheduling:
schedulingCount++
case k6tv1.MigrationPhaseUnset:
unsetCount++
case k6tv1.MigrationRunning, k6tv1.MigrationScheduled, k6tv1.MigrationPreparingTarget, k6tv1.MigrationTargetReady:
runningCount++
case k6tv1.MigrationSucceeded:
cr = append(cr, operatormetrics.CollectorResult{Metric: succeededMigration, Value: 1, Labels: []string{vmim.Spec.VMIName, vmim.Name, vmim.Namespace}})
default:
cr = append(cr, operatormetrics.CollectorResult{Metric: failedMigration, Value: 1, Labels: []string{vmim.Spec.VMIName, vmim.Name, vmim.Namespace}})
}
}
return append(cr,
operatormetrics.CollectorResult{Metric: pendingMigrations, Value: float64(pendingCount)},
operatormetrics.CollectorResult{Metric: schedulingMigrations, Value: float64(schedulingCount)},
operatormetrics.CollectorResult{Metric: unsetMigration, Value: float64(unsetCount)},
operatormetrics.CollectorResult{Metric: runningMigrations, Value: float64(runningCount)},
)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package virt_controller
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
)
const (
transTimeErrFmt = "Error encountered during VMI transition time histogram calculation: %v"
transTimeFail = "Failed to get a histogram for a VMI lifecycle transition times"
)
var (
perfscaleMetrics = []operatormetrics.Metric{
vmiPhaseTransition,
vmiPhaseTransitionTimeFromCreation,
vmiPhaseTransitionFromDeletion,
}
vmiPhaseTransition = operatormetrics.NewHistogramVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_phase_transition_time_seconds",
Help: "Histogram of VM phase transitions duration between different phases in seconds.",
},
prometheus.HistogramOpts{
Buckets: PhaseTransitionTimeBuckets(),
},
[]string{
// phase of the vmi
"phase",
// last phase of the vmi
"last_phase",
},
)
vmiPhaseTransitionTimeFromCreation = operatormetrics.NewHistogramVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_phase_transition_time_from_creation_seconds",
Help: "Histogram of VM phase transitions duration from creation time in seconds.",
},
prometheus.HistogramOpts{
Buckets: PhaseTransitionTimeBuckets(),
},
[]string{
// phase of the vmi
"phase",
},
)
vmiPhaseTransitionFromDeletion = operatormetrics.NewHistogramVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_phase_transition_time_from_deletion_seconds",
Help: "Histogram of VM phase transitions duration from deletion time in seconds.",
},
prometheus.HistogramOpts{
Buckets: PhaseTransitionTimeBuckets(),
},
[]string{
// phase of the vmi
"phase",
},
)
)
func AddVMIPhaseTransitionHandlers(informer cache.SharedIndexInformer) error {
err := addVMIPhaseTransitionHandler(informer)
if err != nil {
return err
}
err = addVMIPhaseTransitionTimeFromCreationHandler(informer)
if err != nil {
return err
}
err = addVMIPhaseTransitionTimeFromDeletionHandler(informer)
if err != nil {
return err
}
return nil
}
func addVMIPhaseTransitionHandler(informer cache.SharedIndexInformer) error {
_, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldVMI, newVMI interface{}) {
updateVMIPhaseTransitionTime(oldVMI.(*v1.VirtualMachineInstance), newVMI.(*v1.VirtualMachineInstance))
},
})
return err
}
func addVMIPhaseTransitionTimeFromCreationHandler(informer cache.SharedIndexInformer) error {
_, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldVMI, newVMI interface{}) {
updateVMIPhaseTransitionTimeFromCreationTime(oldVMI.(*v1.VirtualMachineInstance), newVMI.(*v1.VirtualMachineInstance))
},
})
return err
}
func addVMIPhaseTransitionTimeFromDeletionHandler(informer cache.SharedIndexInformer) error {
_, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldVMI, newVMI interface{}) {
// User is deleting a VM. Record the time from the
// deletionTimestamp to when the VMI enters the final phase
updateVMIPhaseTransitionTimeFromDeletionTime(oldVMI.(*v1.VirtualMachineInstance), newVMI.(*v1.VirtualMachineInstance))
},
})
return err
}
func updateVMIPhaseTransitionTime(oldVMI *v1.VirtualMachineInstance, newVMI *v1.VirtualMachineInstance) {
if oldVMI == nil || oldVMI.Status.Phase == newVMI.Status.Phase {
return
}
diffSeconds, err := getVMITransitionTimeSeconds(false, false, oldVMI, newVMI)
if err != nil {
log.Log.V(4).Infof(transTimeErrFmt, err)
return
}
labels := []string{string(newVMI.Status.Phase), string(oldVMI.Status.Phase)}
histogram, err := vmiPhaseTransition.GetMetricWithLabelValues(labels...)
if err != nil {
log.Log.Reason(err).Error(transTimeFail)
return
}
histogram.Observe(diffSeconds)
}
func updateVMIPhaseTransitionTimeFromCreationTime(oldVMI *v1.VirtualMachineInstance, newVMI *v1.VirtualMachineInstance) {
if oldVMI == nil || oldVMI.Status.Phase == newVMI.Status.Phase {
return
}
diffSeconds, err := getVMITransitionTimeSeconds(true, false, oldVMI, newVMI)
if err != nil {
log.Log.V(4).Infof(transTimeErrFmt, err)
return
}
labels := []string{string(newVMI.Status.Phase)}
histogram, err := vmiPhaseTransitionTimeFromCreation.GetMetricWithLabelValues(labels...)
if err != nil {
log.Log.Reason(err).Error(transTimeFail)
return
}
histogram.Observe(diffSeconds)
}
func updateVMIPhaseTransitionTimeFromDeletionTime(oldVMI *v1.VirtualMachineInstance, newVMI *v1.VirtualMachineInstance) {
if !newVMI.IsMarkedForDeletion() || !newVMI.IsFinal() {
return
}
if oldVMI == nil || oldVMI.Status.Phase == newVMI.Status.Phase {
return
}
diffSeconds, err := getVMITransitionTimeSeconds(false, true, oldVMI, newVMI)
if err != nil {
log.Log.V(4).Infof(transTimeErrFmt, err)
return
}
labels := []string{string(newVMI.Status.Phase)}
histogram, err := vmiPhaseTransitionFromDeletion.GetMetricWithLabelValues(labels...)
if err != nil {
log.Log.Reason(err).Error(transTimeFail)
return
}
histogram.Observe(diffSeconds)
}
func getVMITransitionTimeSeconds(fromCreation bool, fromDeletion bool, oldVMI *v1.VirtualMachineInstance, newVMI *v1.VirtualMachineInstance) (float64, error) {
var oldTime *metav1.Time
var newTime *metav1.Time
if fromCreation || oldVMI == nil || (oldVMI.Status.Phase == v1.VmPhaseUnset) {
oldTime = newVMI.CreationTimestamp.DeepCopy()
} else if fromDeletion && newVMI.IsMarkedForDeletion() {
oldTime = newVMI.DeletionTimestamp.DeepCopy()
} else if fromDeletion && !newVMI.IsMarkedForDeletion() {
return 0.0, fmt.Errorf("missing deletion timestamp")
}
for _, transitionTimestamp := range newVMI.Status.PhaseTransitionTimestamps {
if newTime == nil && transitionTimestamp.Phase == newVMI.Status.Phase {
newTime = transitionTimestamp.PhaseTransitionTimestamp.DeepCopy()
} else if oldTime == nil && oldVMI != nil && transitionTimestamp.Phase == oldVMI.Status.Phase {
oldTime = transitionTimestamp.PhaseTransitionTimestamp.DeepCopy()
} else if oldTime != nil && newTime != nil {
break
}
}
return getTransitionTimeSeconds(oldTime, newTime)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virt_controller
import (
"strconv"
"strings"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
k8sv1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"kubevirt.io/client-go/log"
k6tv1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/network/netbinding"
"kubevirt.io/kubevirt/pkg/util/migrations"
"kubevirt.io/kubevirt/pkg/virt-controller/services"
)
const (
none = "" // Empty values will be ignored by operator-observability and label will not be created
other = "<other>"
annotationPrefix = "vm.kubevirt.io/"
instancetypeVendorLabel = "instancetype.kubevirt.io/vendor"
)
var (
whitelistedInstanceTypeVendors = map[string]bool{
"kubevirt.io": true,
"redhat.com": true,
}
vmiStatsCollector = operatormetrics.Collector{
Metrics: []operatormetrics.Metric{
vmiInfo,
vmiEvictionBlocker,
vmiAddresses,
vmiMigrationStartTime,
vmiMigrationEndTime,
vmiVnicInfo,
vmiLauncherMemoryOverhead,
vmiEphemeralHotplugVolume,
},
CollectCallback: vmiStatsCollectorCallback,
}
vmiInfo = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_info",
Help: "Information about VirtualMachineInstances.",
},
[]string{
// Basic info
"node", "namespace", "name",
// Domain info
"phase", "os", "workload", "flavor",
// Instance type
"instance_type", "preference",
// Guest OS info
"guest_os_kernel_release", "guest_os_machine", "guest_os_arch", "guest_os_name", "guest_os_version_id",
// State info
"evictable", "outdated",
// Pod info
"vmi_pod",
},
)
vmiEvictionBlocker = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_non_evictable",
Help: "Indication for a VirtualMachine that its eviction strategy is set to Live Migration but is not migratable.",
},
[]string{"node", "namespace", "name"},
)
vmiAddresses = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_status_addresses",
Help: "The addresses of a VirtualMachineInstance. This metric provides the address of an available network " +
"interface associated with the VMI in the 'address' label, and about the type of address, such as " +
"internal IP, in the 'type' label.",
},
[]string{"node", "namespace", "name", "vnic_name", "interface_name", "address", "type"},
)
vmiMigrationStartTime = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migration_start_time_seconds",
Help: "The time at which the migration started.",
},
[]string{"node", "namespace", "name", "migration_name"},
)
vmiMigrationEndTime = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_migration_end_time_seconds",
Help: "The time at which the migration ended.",
},
[]string{"node", "namespace", "name", "migration_name", "status"},
)
vmiVnicInfo = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_vnic_info",
Help: "Details of VirtualMachineInstance (VMI) vNIC interfaces, such as vNIC name, binding type, " +
"network name, and binding name for each vNIC of a running instance.",
},
[]string{"name", "namespace", "vnic_name", "binding_type", "network", "binding_name", "model"},
)
vmiLauncherMemoryOverhead = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_launcher_memory_overhead_bytes",
Help: "Estimation of the memory amount required for virt-launcher's infrastructure components (e.g. libvirt, QEMU).",
},
[]string{"namespace", "name"},
)
vmiEphemeralHotplugVolume = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmi_contains_ephemeral_hotplug_volume",
Help: "Reported only for VMIs that contain an ephemeral hotplug volume.",
},
[]string{"namespace", "name", "volume_name"},
)
)
func vmiStatsCollectorCallback() []operatormetrics.CollectorResult {
cachedObjs := stores.VMI.List()
if len(cachedObjs) == 0 {
log.Log.V(4).Infof("No VMIs detected")
return []operatormetrics.CollectorResult{}
}
vmis := make([]*k6tv1.VirtualMachineInstance, len(cachedObjs))
for i, obj := range cachedObjs {
vmis[i] = obj.(*k6tv1.VirtualMachineInstance)
}
return reportVmisStats(vmis)
}
func reportVmisStats(vmis []*k6tv1.VirtualMachineInstance) []operatormetrics.CollectorResult {
var crs []operatormetrics.CollectorResult
for _, vmi := range vmis {
crs = append(crs, collectVMIInfo(vmi))
crs = append(crs, getEvictionBlocker(vmi))
crs = append(crs, collectVMIInterfacesInfo(vmi)...)
crs = append(crs, collectVMIMigrationTime(vmi)...)
crs = append(crs, CollectVmisVnicInfo(vmi)...)
crs = append(crs, collectVMILauncherMemoryOverhead(vmi))
crs = append(crs, collectVMIEphemeralHotplug(vmi)...)
}
return crs
}
func collectVMILauncherMemoryOverhead(vmi *k6tv1.VirtualMachineInstance) operatormetrics.CollectorResult {
memoryOverhead := services.CalculateMemoryOverhead(clusterConfig, netbinding.MemoryCalculator{}, vmi)
return operatormetrics.CollectorResult{
Metric: vmiLauncherMemoryOverhead,
Labels: []string{vmi.Namespace, vmi.Name},
Value: float64(memoryOverhead.Value()),
}
}
func collectVMIInfo(vmi *k6tv1.VirtualMachineInstance) operatormetrics.CollectorResult {
os, workload, flavor := getSystemInfoFromAnnotations(vmi.Annotations)
instanceType := getVMIInstancetype(vmi)
preference := getVMIPreference(vmi)
kernelRelease, guestOSMachineArch, name, versionID := getGuestOSInfo(vmi)
guestOSMachineType := getVMIMachine(vmi)
vmiPod := getVMIPod(vmi)
return operatormetrics.CollectorResult{
Metric: vmiInfo,
Labels: []string{
vmi.Status.NodeName, vmi.Namespace, vmi.Name,
getVMIPhase(vmi), os, workload, flavor, instanceType, preference,
kernelRelease, guestOSMachineType, guestOSMachineArch, name, versionID,
strconv.FormatBool(isVMEvictable(vmi)),
strconv.FormatBool(isVMIOutdated(vmi)),
vmiPod,
},
Value: 1.0,
}
}
func getVMIPhase(vmi *k6tv1.VirtualMachineInstance) string {
return strings.ToLower(string(vmi.Status.Phase))
}
func getSystemInfoFromAnnotations(annotations map[string]string) (os, workload, flavor string) {
os = none
workload = none
flavor = none
if val, ok := annotations[annotationPrefix+"os"]; ok {
os = val
}
if val, ok := annotations[annotationPrefix+"workload"]; ok {
workload = val
}
if val, ok := annotations[annotationPrefix+"flavor"]; ok {
flavor = val
}
return
}
func getGuestOSInfo(vmi *k6tv1.VirtualMachineInstance) (kernelRelease, guestOSMachineArch, name, versionID string) {
if vmi.Status.GuestOSInfo == (k6tv1.VirtualMachineInstanceGuestOSInfo{}) {
return
}
if vmi.Status.GuestOSInfo.KernelRelease != "" {
kernelRelease = vmi.Status.GuestOSInfo.KernelRelease
}
if vmi.Status.GuestOSInfo.Machine != "" {
guestOSMachineArch = vmi.Status.GuestOSInfo.Machine
}
if vmi.Status.GuestOSInfo.Name != "" {
name = vmi.Status.GuestOSInfo.Name
}
if vmi.Status.GuestOSInfo.VersionID != "" {
versionID = vmi.Status.GuestOSInfo.VersionID
}
return
}
func getVMIMachine(vmi *k6tv1.VirtualMachineInstance) (guestOSMachineType string) {
if vmi.Status.Machine != nil {
guestOSMachineType = vmi.Status.Machine.Type
}
return
}
func getVMIPod(vmi *k6tv1.VirtualMachineInstance) string {
objs, err := indexers.KVPod.ByIndex(cache.NamespaceIndex, vmi.Namespace)
if err != nil {
return none
}
for _, obj := range objs {
pod, ok := obj.(*k8sv1.Pod)
if !ok {
continue
}
if pod.Labels["kubevirt.io/created-by"] == string(vmi.UID) && pod.Status.Phase == k8sv1.PodRunning {
if vmi.Status.NodeName == pod.Spec.NodeName {
return pod.Name
}
}
}
return none
}
func getVMIInstancetype(vmi *k6tv1.VirtualMachineInstance) string {
if instancetypeName, ok := vmi.Annotations[k6tv1.InstancetypeAnnotation]; ok {
key := types.NamespacedName{
Namespace: vmi.Namespace,
Name: instancetypeName,
}
return fetchResourceName(key.String(), stores.Instancetype)
}
if clusterInstancetypeName, ok := vmi.Annotations[k6tv1.ClusterInstancetypeAnnotation]; ok {
return fetchResourceName(clusterInstancetypeName, stores.ClusterInstancetype)
}
return none
}
func getVMIPreference(vmi *k6tv1.VirtualMachineInstance) string {
if preferenceName, ok := vmi.Annotations[k6tv1.PreferenceAnnotation]; ok {
key := types.NamespacedName{
Namespace: vmi.Namespace,
Name: preferenceName,
}
return fetchResourceName(key.String(), stores.Preference)
}
if clusterPreferenceName, ok := vmi.Annotations[k6tv1.ClusterPreferenceAnnotation]; ok {
return fetchResourceName(clusterPreferenceName, stores.ClusterPreference)
}
return none
}
func fetchResourceName(key string, store cache.Store) string {
obj, ok, err := store.GetByKey(key)
if err != nil || !ok {
return other
}
apiObj, ok := obj.(v1.Object)
if !ok {
return other
}
vendorName := apiObj.GetLabels()[instancetypeVendorLabel]
if _, isWhitelisted := whitelistedInstanceTypeVendors[vendorName]; isWhitelisted {
return apiObj.GetName()
}
return other
}
func getEvictionBlocker(vmi *k6tv1.VirtualMachineInstance) operatormetrics.CollectorResult {
nonEvictable := 1.0
if isVMEvictable(vmi) {
nonEvictable = 0.0
}
return operatormetrics.CollectorResult{
Metric: vmiEvictionBlocker,
Labels: []string{vmi.Status.NodeName, vmi.Namespace, vmi.Name},
Value: nonEvictable,
}
}
func isVMEvictable(vmi *k6tv1.VirtualMachineInstance) bool {
if migrations.VMIMigratableOnEviction(clusterConfig, vmi) {
vmiIsMigratableCond := controller.NewVirtualMachineInstanceConditionManager().
GetCondition(vmi, k6tv1.VirtualMachineInstanceIsMigratable)
// As this metric is used for user alert we refer to be conservative - so if the VirtualMachineInstanceIsMigratable
// condition is still not set we treat the VM as if it's "not migratable"
if vmiIsMigratableCond == nil || vmiIsMigratableCond.Status == k8sv1.ConditionFalse {
return false
}
}
return true
}
func isVMIOutdated(vmi *k6tv1.VirtualMachineInstance) bool {
_, hasOutdatedLabel := vmi.Labels[k6tv1.OutdatedLauncherImageLabel]
return hasOutdatedLabel
}
func collectVMIInterfacesInfo(vmi *k6tv1.VirtualMachineInstance) []operatormetrics.CollectorResult {
var crs []operatormetrics.CollectorResult
for _, iface := range vmi.Status.Interfaces {
if cr := collectVMIInterfaceInfo(vmi, iface); cr != nil {
crs = append(crs, *cr)
}
}
return crs
}
func collectVMIInterfaceInfo(vmi *k6tv1.VirtualMachineInstance, iface k6tv1.VirtualMachineInstanceNetworkInterface) *operatormetrics.CollectorResult {
interfaceType := "ExternalInterface"
if iface.IP == "" {
if iface.Name == "" && iface.InterfaceName == "" {
// Avoid duplicate metric labels error
return nil
}
interfaceType = "SystemInterface"
}
return &operatormetrics.CollectorResult{
Metric: vmiAddresses,
Labels: []string{
vmi.Status.NodeName, vmi.Namespace, vmi.Name,
iface.Name, iface.InterfaceName, iface.IP, interfaceType,
},
Value: 1.0,
}
}
func collectVMIMigrationTime(vmi *k6tv1.VirtualMachineInstance) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
var migrationName string
if vmi.Status.MigrationState == nil {
return cr
}
migrationName = getMigrationNameFromMigrationUID(vmi.Status.MigrationState.MigrationUID)
if vmi.Status.MigrationState.StartTimestamp != nil {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmiMigrationStartTime,
Value: float64(vmi.Status.MigrationState.StartTimestamp.Time.Unix()),
Labels: []string{vmi.Status.NodeName, vmi.Namespace, vmi.Name, migrationName},
})
}
if vmi.Status.MigrationState.EndTimestamp != nil {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmiMigrationEndTime,
Value: float64(vmi.Status.MigrationState.EndTimestamp.Time.Unix()),
Labels: []string{vmi.Status.NodeName, vmi.Namespace, vmi.Name, migrationName,
calculateMigrationStatus(vmi.Status.MigrationState),
},
})
}
return cr
}
func calculateMigrationStatus(migrationState *k6tv1.VirtualMachineInstanceMigrationState) string {
if !migrationState.Completed {
return ""
}
if migrationState.Failed {
return "failed"
}
return "succeeded"
}
func getMigrationNameFromMigrationUID(migrationUID types.UID) string {
objs, err := indexers.VMIMigration.ByIndex(controller.ByMigrationUIDIndex, string(migrationUID))
if err != nil || len(objs) == 0 {
return none
}
return objs[0].(*k6tv1.VirtualMachineInstanceMigration).Name
}
func CollectVmisVnicInfo(vmi *k6tv1.VirtualMachineInstance) []operatormetrics.CollectorResult {
var results []operatormetrics.CollectorResult
interfaces := vmi.Spec.Domain.Devices.Interfaces
networks := vmi.Spec.Networks
for _, iface := range interfaces {
model := "<none>"
if iface.Model != "" {
model = iface.Model
}
bindingType, bindingName := getBinding(iface)
networkName, matchFound := getNetworkName(iface.Name, networks)
if !matchFound {
continue
}
results = append(results, operatormetrics.CollectorResult{
Metric: vmiVnicInfo,
Labels: []string{
vmi.Name,
vmi.Namespace,
iface.Name,
bindingType,
networkName,
bindingName,
model,
},
Value: 1.0,
})
}
return results
}
func collectVMIEphemeralHotplug(vmi *k6tv1.VirtualMachineInstance) []operatormetrics.CollectorResult {
results := []operatormetrics.CollectorResult{}
annotations := vmi.GetAnnotations()
if volumeName, exists := annotations[k6tv1.EphemeralHotplugAnnotation]; exists {
results = append(results, operatormetrics.CollectorResult{
Metric: vmiEphemeralHotplugVolume,
Labels: []string{vmi.Namespace, vmi.Name, volumeName},
Value: float64(1),
})
}
return results
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virt_controller
import (
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
io_prometheus_client "github.com/prometheus/client_model/go"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
)
var (
vmSnapshotMetrics = []operatormetrics.Metric{
VmSnapshotSucceededTimestamp,
}
VmSnapshotSucceededTimestamp = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vmsnapshot_succeeded_timestamp_seconds",
Help: "Returns the timestamp of successful virtual machine snapshot.",
},
[]string{"name", "snapshot_name", "namespace"},
)
)
func HandleSucceededVMSnapshot(snapshot *snapshotv1.VirtualMachineSnapshot) {
if snapshot.Status.Phase == snapshotv1.Succeeded {
VmSnapshotSucceededTimestamp.WithLabelValues(
snapshot.Spec.Source.Name,
snapshot.Name,
snapshot.Namespace,
).Set(float64(snapshot.Status.CreationTime.Unix()))
}
}
func GetVmSnapshotSucceededTimestamp(vm, snapshot, namespace string) (float64, error) {
dto := &io_prometheus_client.Metric{}
if err := VmSnapshotSucceededTimestamp.WithLabelValues(vm, snapshot, namespace).Write(dto); err != nil {
return 0, err
}
return *dto.Gauge.Value, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virt_controller
import (
"regexp"
"strings"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
k6tv1 "kubevirt.io/api/core/v1"
instancetypeapi "kubevirt.io/api/instancetype"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/controller"
vmlabels "kubevirt.io/kubevirt/pkg/monitoring/metrics/common/labels"
"kubevirt.io/kubevirt/pkg/util/hardware"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/converter/vcpu"
)
var (
vmStatsCollector = operatormetrics.Collector{
Metrics: append(timestampMetrics, vmResourceRequests, vmResourceLimits, vmInfo, vmDiskAllocatedSize, vmCreationTimestamp, vmVnicInfo, vmLabels),
CollectCallback: vmStatsCollectorCallback,
}
invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
// cached labels configuration (initialized on first use)
vmLabelsCfg vmlabels.Config
timestampMetrics = []operatormetrics.Metric{
startingTimestamp,
runningTimestamp,
migratingTimestamp,
nonRunningTimestamp,
errorTimestamp,
}
startingTimestamp = operatormetrics.NewCounterVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_starting_status_last_transition_timestamp_seconds",
Help: "Virtual Machine last transition timestamp to starting status.",
},
labels,
)
runningTimestamp = operatormetrics.NewCounterVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_running_status_last_transition_timestamp_seconds",
Help: "Virtual Machine last transition timestamp to running status.",
},
labels,
)
migratingTimestamp = operatormetrics.NewCounterVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_migrating_status_last_transition_timestamp_seconds",
Help: "Virtual Machine last transition timestamp to migrating status.",
},
labels,
)
nonRunningTimestamp = operatormetrics.NewCounterVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_non_running_status_last_transition_timestamp_seconds",
Help: "Virtual Machine last transition timestamp to paused/stopped status.",
},
labels,
)
errorTimestamp = operatormetrics.NewCounterVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_error_status_last_transition_timestamp_seconds",
Help: "Virtual Machine last transition timestamp to error status.",
},
labels,
)
labels = []string{"name", "namespace"}
startingStatuses = []k6tv1.VirtualMachinePrintableStatus{
k6tv1.VirtualMachineStatusProvisioning,
k6tv1.VirtualMachineStatusStarting,
k6tv1.VirtualMachineStatusWaitingForVolumeBinding,
}
runningStatuses = []k6tv1.VirtualMachinePrintableStatus{
k6tv1.VirtualMachineStatusRunning,
}
migratingStatuses = []k6tv1.VirtualMachinePrintableStatus{
k6tv1.VirtualMachineStatusMigrating,
}
nonRunningStatuses = []k6tv1.VirtualMachinePrintableStatus{
k6tv1.VirtualMachineStatusStopped,
k6tv1.VirtualMachineStatusPaused,
k6tv1.VirtualMachineStatusStopping,
k6tv1.VirtualMachineStatusTerminating,
}
errorStatuses = []k6tv1.VirtualMachinePrintableStatus{
k6tv1.VirtualMachineStatusCrashLoopBackOff,
k6tv1.VirtualMachineStatusUnknown,
k6tv1.VirtualMachineStatusUnschedulable,
k6tv1.VirtualMachineStatusErrImagePull,
k6tv1.VirtualMachineStatusImagePullBackOff,
k6tv1.VirtualMachineStatusPvcNotFound,
k6tv1.VirtualMachineStatusDataVolumeError,
}
vmResourceRequests = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_resource_requests",
Help: "Resources requested by Virtual Machine. Reports memory and CPU requests.",
},
[]string{"name", "namespace", "resource", "unit", "source"},
)
vmResourceLimits = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_resource_limits",
Help: "Resources limits by Virtual Machine. Reports memory and CPU limits.",
},
[]string{"name", "namespace", "resource", "unit"},
)
vmInfo = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_info",
Help: "Information about Virtual Machines.",
},
[]string{
// Basic info
"name", "namespace",
// VM annotations
"os", "workload", "flavor",
// VM Machine Type
"machine_type",
// Instance type
"instance_type", "preference",
// Status
"status", "status_group",
},
)
vmDiskAllocatedSize = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_disk_allocated_size_bytes",
Help: "Allocated disk size of a Virtual Machine in bytes, based on its PersistentVolumeClaim. " +
"Includes persistentvolumeclaim (PVC name), volume_mode (disk presentation mode: Filesystem or Block), " +
"and device (disk name).",
},
[]string{"name", "namespace", "persistentvolumeclaim", "volume_mode", "device"},
)
vmCreationTimestamp = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_create_date_timestamp_seconds",
Help: "Virtual Machine creation timestamp.",
},
[]string{"name", "namespace"},
)
vmVnicInfo = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_vnic_info",
Help: "Details of Virtual Machine (VM) vNIC interfaces, such as vNIC name, binding type, network name, " +
"and binding name for each vNIC defined in the VM's configuration.",
},
[]string{"name", "namespace", "vnic_name", "binding_type", "network", "binding_name", "model"},
)
vmLabels = operatormetrics.NewGaugeVec(
operatormetrics.MetricOpts{
Name: "kubevirt_vm_labels",
Help: "The metric exposes the VM labels as Prometheus labels. Configure allowed and ignored labels via the 'kubevirt-vm-labels-config' ConfigMap.",
},
labels,
)
)
func vmStatsCollectorCallback() []operatormetrics.CollectorResult {
cachedObjs := stores.VM.List()
if len(cachedObjs) == 0 {
return []operatormetrics.CollectorResult{}
}
vms := make([]*k6tv1.VirtualMachine, len(cachedObjs))
for i, obj := range cachedObjs {
vms[i] = obj.(*k6tv1.VirtualMachine)
}
var results []operatormetrics.CollectorResult
results = append(results, CollectDiskAllocatedSize(vms)...)
results = append(results, CollectVMsInfo(vms)...)
results = append(results, CollectResourceRequestsAndLimits(vms)...)
results = append(results, reportVmsStats(vms)...)
results = append(results, collectVMCreationTimestamp(vms)...)
results = append(results, CollectVmsVnicInfo(vms)...)
return results
}
func CollectVMsInfo(vms []*k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var results []operatormetrics.CollectorResult
for _, vm := range vms {
os, workload, flavor, machineType := none, none, none, none
if vm.Spec.Template != nil {
os, workload, flavor = getSystemInfoFromAnnotations(vm.Spec.Template.ObjectMeta.Annotations)
if vm.Spec.Template.Spec.Domain.Machine != nil {
machineType = vm.Spec.Template.Spec.Domain.Machine.Type
}
}
instanceType := getVMInstancetype(vm)
preference := getVMPreference(vm)
results = append(results, operatormetrics.CollectorResult{
Metric: vmInfo,
Labels: []string{
vm.Name, vm.Namespace,
os, workload, flavor, machineType,
instanceType, preference,
strings.ToLower(string(vm.Status.PrintableStatus)), getVMStatusGroup(vm.Status.PrintableStatus),
},
Value: 1.0,
})
}
return results
}
func getVMInstancetype(vm *k6tv1.VirtualMachine) string {
instancetype := vm.Spec.Instancetype
if instancetype == nil {
return none
}
if strings.EqualFold(instancetype.Kind, instancetypeapi.SingularResourceName) {
key := types.NamespacedName{
Namespace: vm.Namespace,
Name: instancetype.Name,
}
return fetchResourceName(key.String(), stores.Instancetype)
}
if strings.EqualFold(instancetype.Kind, instancetypeapi.ClusterSingularResourceName) {
return fetchResourceName(instancetype.Name, stores.ClusterInstancetype)
}
return none
}
func getVMPreference(vm *k6tv1.VirtualMachine) string {
preference := vm.Spec.Preference
if preference == nil {
return none
}
if strings.EqualFold(preference.Kind, instancetypeapi.SingularPreferenceResourceName) {
key := types.NamespacedName{
Namespace: vm.Namespace,
Name: preference.Name,
}
return fetchResourceName(key.String(), stores.Preference)
}
if strings.EqualFold(preference.Kind, instancetypeapi.ClusterSingularPreferenceResourceName) {
return fetchResourceName(preference.Name, stores.ClusterPreference)
}
return none
}
func getVMStatusGroup(status k6tv1.VirtualMachinePrintableStatus) string {
switch {
case containsStatus(status, startingStatuses):
return "starting"
case containsStatus(status, runningStatuses):
return "running"
case containsStatus(status, migratingStatuses):
return "migrating"
case containsStatus(status, nonRunningStatuses):
return "non_running"
case containsStatus(status, errorStatuses):
return "error"
}
return "<unknown>"
}
func CollectResourceRequestsAndLimits(vms []*k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var results []operatormetrics.CollectorResult
for _, vm := range vms {
// Apply any instance type and preference to a copy of the VM before proceeding
vmCopy := vm.DeepCopy()
_ = vmApplier.ApplyToVM(vmCopy)
// Memory requests and limits from domain resources
results = append(results, collectMemoryResourceRequestsFromDomainResources(vmCopy)...)
results = append(results, collectMemoryResourceLimitsFromDomainResources(vmCopy)...)
// CPU requests from domain CPU
results = append(results, collectCpuResourceRequestsFromDomainCpu(vmCopy)...)
// CPU requests and limits from domain resources
results = append(results, collectCpuResourceRequestsFromDomainResources(vmCopy)...)
results = append(results, collectCpuResourceLimitsFromDomainResources(vmCopy)...)
// Allocated CPU and memory requests after applying hierarchy and defaults
results = append(results, collectAllocatedCpuValues(vmCopy)...)
results = append(results, collectAllocatedMemoryValues(vmCopy)...)
}
return results
}
func reportVmsStats(vms []*k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
for _, vm := range vms {
cr = append(cr, reportVmStats(vm)...)
}
return cr
}
func reportVmStats(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
// VM labels metric collection
cr = append(cr, reportVmLabels(vm)...)
// VM timestamp metrics collection
status := vm.Status.PrintableStatus
currentStateMetric := getMetricDesc(status)
lastTransitionTime := getLastConditionDetails(vm)
for _, metric := range timestampMetrics {
value := float64(0)
if metric == currentStateMetric {
value = float64(lastTransitionTime)
}
cr = append(cr, operatormetrics.CollectorResult{
Metric: metric,
Labels: []string{vm.Name, vm.Namespace},
Value: value,
})
}
return cr
}
func getMetricDesc(status k6tv1.VirtualMachinePrintableStatus) *operatormetrics.CounterVec {
switch {
case containsStatus(status, startingStatuses):
return startingTimestamp
case containsStatus(status, runningStatuses):
return runningTimestamp
case containsStatus(status, migratingStatuses):
return migratingTimestamp
case containsStatus(status, nonRunningStatuses):
return nonRunningTimestamp
case containsStatus(status, errorStatuses):
return errorTimestamp
}
return errorTimestamp
}
func containsStatus(target k6tv1.VirtualMachinePrintableStatus, elems []k6tv1.VirtualMachinePrintableStatus) bool {
for _, elem := range elems {
if elem == target {
return true
}
}
return false
}
func getLastConditionDetails(vm *k6tv1.VirtualMachine) int64 {
conditions := []k6tv1.VirtualMachineConditionType{
k6tv1.VirtualMachineReady,
k6tv1.VirtualMachineFailure,
k6tv1.VirtualMachinePaused,
}
latestTransitionTime := int64(-1)
for _, c := range vm.Status.Conditions {
if containsCondition(c.Type, conditions) && c.LastTransitionTime.Unix() > latestTransitionTime {
latestTransitionTime = c.LastTransitionTime.Unix()
}
}
return latestTransitionTime
}
func collectMemoryResourceRequestsFromDomainResources(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
if vm.Spec.Template == nil {
return cr
}
memoryRequested := vm.Spec.Template.Spec.Domain.Resources.Requests.Memory()
if !memoryRequested.IsZero() {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(memoryRequested.Value()),
Labels: []string{vm.Name, vm.Namespace, "memory", "bytes", "domain"},
})
}
if vm.Spec.Template.Spec.Domain.Memory == nil {
return cr
}
guestMemory := vm.Spec.Template.Spec.Domain.Memory.Guest
if guestMemory != nil && !guestMemory.IsZero() {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(guestMemory.Value()),
Labels: []string{vm.Name, vm.Namespace, "memory", "bytes", "guest"},
})
}
hugepagesMemory := vm.Spec.Template.Spec.Domain.Memory.Hugepages
if hugepagesMemory != nil {
quantity, err := resource.ParseQuantity(hugepagesMemory.PageSize)
if err == nil {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(quantity.Value()),
Labels: []string{vm.Name, vm.Namespace, "memory", "bytes", "hugepages"},
})
}
}
return cr
}
func collectMemoryResourceLimitsFromDomainResources(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
if vm.Spec.Template == nil {
return []operatormetrics.CollectorResult{}
}
memoryLimit := vm.Spec.Template.Spec.Domain.Resources.Limits.Memory()
if memoryLimit.IsZero() {
return []operatormetrics.CollectorResult{}
}
return []operatormetrics.CollectorResult{{
Metric: vmResourceLimits,
Value: float64(memoryLimit.Value()),
Labels: []string{vm.Name, vm.Namespace, "memory", "bytes"},
}}
}
// collectAllocatedMemoryValues calculates allocated (effective) memory
func collectAllocatedMemoryValues(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
if vm.Spec.Template == nil {
return cr
}
vmi := &k6tv1.VirtualMachineInstance{Spec: vm.Spec.Template.Spec}
allocatedMemory := vcpu.GetVirtualMemory(vmi)
if allocatedMemory != nil && !allocatedMemory.IsZero() {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(allocatedMemory.Value()),
Labels: []string{vm.Name, vm.Namespace, "memory", "bytes", "guest_effective"},
})
}
return cr
}
func collectCpuResourceRequestsFromDomainCpu(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
if vm.Spec.Template == nil || vm.Spec.Template.Spec.Domain.CPU == nil {
return cr
}
if vm.Spec.Template.Spec.Domain.CPU.Cores != 0 {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(vm.Spec.Template.Spec.Domain.CPU.Cores),
Labels: []string{vm.Name, vm.Namespace, "cpu", "cores", "domain"},
})
}
if vm.Spec.Template.Spec.Domain.CPU.Threads != 0 {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(vm.Spec.Template.Spec.Domain.CPU.Threads),
Labels: []string{vm.Name, vm.Namespace, "cpu", "threads", "domain"},
})
}
if vm.Spec.Template.Spec.Domain.CPU.Sockets != 0 {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(vm.Spec.Template.Spec.Domain.CPU.Sockets),
Labels: []string{vm.Name, vm.Namespace, "cpu", "sockets", "domain"},
})
}
return cr
}
func collectCpuResourceRequestsFromDomainResources(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
if vm.Spec.Template == nil {
return cr
}
cpuRequests := vm.Spec.Template.Spec.Domain.Resources.Requests.Cpu()
if cpuRequests == nil || cpuRequests.IsZero() {
// If no CPU requests and no Domain CPU are set, default to 1 thread with 1 core and 1 socket
if vm.Spec.Template.Spec.Domain.CPU == nil {
return append(cr,
operatormetrics.CollectorResult{Metric: vmResourceRequests, Value: 1.0, Labels: []string{vm.Name, vm.Namespace, "cpu", "cores", "default"}},
operatormetrics.CollectorResult{Metric: vmResourceRequests, Value: 1.0, Labels: []string{vm.Name, vm.Namespace, "cpu", "threads", "default"}},
operatormetrics.CollectorResult{Metric: vmResourceRequests, Value: 1.0, Labels: []string{vm.Name, vm.Namespace, "cpu", "sockets", "default"}},
)
}
return cr
}
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(cpuRequests.ScaledValue(resource.Milli)) / 1000,
Labels: []string{vm.Name, vm.Namespace, "cpu", "cores", "requests"},
})
return cr
}
func collectCpuResourceLimitsFromDomainResources(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
if vm.Spec.Template == nil {
return cr
}
cpuLimits := vm.Spec.Template.Spec.Domain.Resources.Limits.Cpu()
if cpuLimits == nil || cpuLimits.IsZero() {
return cr
}
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceLimits,
Value: float64(cpuLimits.ScaledValue(resource.Milli)) / 1000,
Labels: []string{vm.Name, vm.Namespace, "cpu", "cores"},
})
return cr
}
// collectAllocatedCpuValues calculates allocated (effective) CPU vCPUs
func collectAllocatedCpuValues(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
if vm.Spec.Template == nil {
return cr
}
if vm.Spec.Template.Spec.Domain.CPU != nil {
allocatedVCPUs := hardware.GetNumberOfVCPUs(vm.Spec.Template.Spec.Domain.CPU)
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: float64(allocatedVCPUs),
Labels: []string{vm.Name, vm.Namespace, "cpu", "cores", "guest_effective"},
})
} else {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmResourceRequests,
Value: 1.0,
Labels: []string{vm.Name, vm.Namespace, "cpu", "cores", "guest_effective"},
})
}
return cr
}
func containsCondition(target k6tv1.VirtualMachineConditionType, elems []k6tv1.VirtualMachineConditionType) bool {
for _, elem := range elems {
if elem == target {
return true
}
}
return false
}
func CollectDiskAllocatedSize(vms []*k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
for _, vm := range vms {
if vm.Spec.Template != nil {
cr = append(cr, collectDiskMetricsFromPVC(vm)...)
}
}
return cr
}
func collectDiskMetricsFromPVC(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
for _, vol := range vm.Spec.Template.Spec.Volumes {
pvcName, diskName, isDataVolume := getPVCAndDiskName(vol)
if pvcName == "" {
continue
}
key := controller.NamespacedKey(vm.Namespace, pvcName)
obj, exists, err := stores.PersistentVolumeClaim.GetByKey(key)
if err != nil {
log.Log.Errorf("Error retrieving PVC %s in namespace %s: %v", pvcName, vm.Namespace, err)
continue
}
if !exists {
log.Log.Warningf("PVC %s in namespace %s does not exist", pvcName, vm.Namespace)
continue
}
pvc, ok := obj.(*k8sv1.PersistentVolumeClaim)
if !ok {
log.Log.Warningf("Object for PVC %s in namespace %s is not of expected type", pvcName, vm.Namespace)
continue
}
cr = append(cr, getDiskSizeValues(vm, pvc, diskName, isDataVolume))
}
return cr
}
func getPVCAndDiskName(vol k6tv1.Volume) (pvcName, diskName string, isDataVolume bool) {
if vol.PersistentVolumeClaim != nil {
return vol.PersistentVolumeClaim.ClaimName, vol.Name, false
}
if vol.DataVolume != nil {
return vol.DataVolume.Name, vol.Name, true
}
return "", "", false
}
func getDiskSizeValues(vm *k6tv1.VirtualMachine, pvc *k8sv1.PersistentVolumeClaim, diskName string, isDataVolume bool) operatormetrics.CollectorResult {
var pvcSize *resource.Quantity
if isDataVolume {
pvcSize = getSizeFromDataVolumeTemplates(vm, pvc.Name)
}
if pvcSize == nil {
pvcSize = pvc.Spec.Resources.Requests.Storage()
}
volumeMode := ""
if pvc.Spec.VolumeMode != nil {
volumeMode = string(*pvc.Spec.VolumeMode)
}
return operatormetrics.CollectorResult{
Metric: vmDiskAllocatedSize,
Value: float64(pvcSize.Value()),
Labels: []string{vm.Name, vm.Namespace, pvc.Name, volumeMode, diskName},
}
}
func getSizeFromDataVolumeTemplates(vm *k6tv1.VirtualMachine, dataVolumeName string) *resource.Quantity {
for _, dvTemplate := range vm.Spec.DataVolumeTemplates {
if dvTemplate.Name == dataVolumeName {
if dvTemplate.Spec.PVC != nil {
return dvTemplate.Spec.PVC.Resources.Requests.Storage()
} else if dvTemplate.Spec.Storage != nil {
return dvTemplate.Spec.Storage.Resources.Requests.Storage()
}
}
}
return nil
}
func collectVMCreationTimestamp(vms []*k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
for _, vm := range vms {
if !vm.CreationTimestamp.IsZero() {
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmCreationTimestamp,
Labels: []string{vm.Name, vm.Namespace},
Value: float64(vm.CreationTimestamp.Unix()),
})
}
}
return cr
}
func CollectVmsVnicInfo(vms []*k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var results []operatormetrics.CollectorResult
for _, vm := range vms {
if vm.Spec.Template == nil || vm.Spec.Template.Spec.Domain.Devices.Interfaces == nil {
continue
}
interfaces := vm.Spec.Template.Spec.Domain.Devices.Interfaces
networks := vm.Spec.Template.Spec.Networks
for _, iface := range interfaces {
model := "<none>"
if iface.Model != "" {
model = iface.Model
}
bindingType, bindingName := getBinding(iface)
networkName, matchFound := getNetworkName(iface.Name, networks)
if !matchFound {
continue
}
results = append(results, operatormetrics.CollectorResult{
Metric: vmVnicInfo,
Labels: []string{
vm.Name,
vm.Namespace,
iface.Name,
bindingType,
networkName,
bindingName,
model,
},
Value: 1.0,
})
}
}
return results
}
func getBinding(iface k6tv1.Interface) (bindingType, bindingName string) {
switch {
case iface.Masquerade != nil:
bindingType = "core"
bindingName = "masquerade"
case iface.Bridge != nil:
bindingType = "core"
bindingName = "bridge"
case iface.SRIOV != nil:
bindingType = "core"
bindingName = "sriov"
case iface.Binding != nil:
bindingType = "plugin"
bindingName = iface.Binding.Name
}
return bindingType, bindingName
}
func getNetworkName(ifaceName string, networks []k6tv1.Network) (string, bool) {
if net := LookupNetworkByName(networks, ifaceName); net != nil {
if net.Pod != nil {
return "pod networking", true
} else if net.Multus != nil {
return net.Multus.NetworkName, true
}
}
return "", false
}
func LookupNetworkByName(networks []k6tv1.Network, name string) *k6tv1.Network {
for _, net := range networks {
if net.Name == name {
return &net
}
}
return nil
}
func reportVmLabels(vm *k6tv1.VirtualMachine) []operatormetrics.CollectorResult {
var cr []operatormetrics.CollectorResult
if vmLabelsCfg == nil {
var err error
vmLabelsCfg, err = vmlabels.New(kubevirtClient)
if err != nil {
log.Log.Warningf("vm-labels: watcher init error: %v", err)
return cr
}
}
// Merge labels from VM metadata and VM template metadata (deduplicated)
mergedLabels := make(map[string]string)
for key, value := range vm.Labels {
mergedLabels[key] = value
}
if vm.Spec.Template != nil {
for key, value := range vm.Spec.Template.ObjectMeta.Labels {
mergedLabels[key] = value
}
}
if len(mergedLabels) == 0 {
return cr
}
constLabels := make(map[string]string)
for key, value := range mergedLabels {
if vmLabelsCfg.ShouldReport(key) {
sanitizedLabelName := sanitizeLabelName(key)
prometheusLabelName := "label_" + sanitizedLabelName
constLabels[prometheusLabelName] = value
}
}
if len(constLabels) == 0 {
log.Log.Infof("kubevirt_vm_labels skipping vm %s/%s, no allowlist keys found", vm.Namespace, vm.Name)
return cr
}
cr = append(cr, operatormetrics.CollectorResult{
Metric: vmLabels,
Labels: []string{vm.Name, vm.Namespace},
ConstLabels: constLabels,
Value: 1.0,
})
return cr
}
// sanitizeLabelName transforms a VM label key into a Prometheus-safe metric label name.
// It replaces any non [A-Za-z0-9_] characters with '_' and ensures the first
// character is a letter or underscore, as required by Prometheus label naming.
func sanitizeLabelName(name string) string {
sanitized := invalidLabelCharRE.ReplaceAllString(name, "_")
if len(sanitized) == 0 || !((sanitized[0] >= 'a' && sanitized[0] <= 'z') || (sanitized[0] >= 'A' && sanitized[0] <= 'Z') || sanitized[0] == '_') {
sanitized = "_" + sanitized
}
return sanitized
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package alerts
import (
"errors"
"fmt"
"os"
"strings"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/rhobs/operator-observability-toolkit/pkg/operatorrules"
)
const (
prometheusRunbookAnnotationKey = "runbook_url"
defaultRunbookURLTemplate = "https://kubevirt.io/monitoring/runbooks/%s"
runbookURLTemplateEnv = "RUNBOOK_URL_TEMPLATE"
severityAlertLabelKey = "severity"
operatorHealthImpactLabelKey = "operator_health_impact"
partOfAlertLabelKey = "kubernetes_operator_part_of"
componentAlertLabelKey = "kubernetes_operator_component"
kubevirtLabelValue = "kubevirt"
durationFiveMinutes = "5 minutes"
)
func Register(namespace string) error {
alerts := [][]promv1.Rule{
systemAlerts(namespace),
virtApiAlerts(namespace),
virtControllerAlerts(namespace),
virtHandlerAlerts(namespace),
virtOperatorAlerts(namespace),
vmsAlerts,
}
runbookURLTemplate := getRunbookURLTemplate()
for _, alertGroup := range alerts {
for _, alert := range alertGroup {
alert.Labels[partOfAlertLabelKey] = kubevirtLabelValue
alert.Labels[componentAlertLabelKey] = kubevirtLabelValue
alert.Annotations[prometheusRunbookAnnotationKey] = fmt.Sprintf(runbookURLTemplate, alert.Alert)
}
}
return operatorrules.RegisterAlerts(alerts...)
}
func getRunbookURLTemplate() string {
runbookURLTemplate, exists := os.LookupEnv(runbookURLTemplateEnv)
if !exists {
runbookURLTemplate = defaultRunbookURLTemplate
}
if strings.Count(runbookURLTemplate, "%s") != 1 {
panic(errors.New("runbook URL template must have exactly 1 %s substring"))
}
return runbookURLTemplate
}
func getErrorRatio(ns string, podName string, errorCodeRegex string, durationInMinutes int) string {
errorRatioQuery := "sum ( rate ( kubevirt_rest_client_requests_total{namespace=\"%s\",pod=~\"%s-.*\",code=~\"%s\"} [%dm] ) ) / sum ( rate ( kubevirt_rest_client_requests_total{namespace=\"%s\",pod=~\"%s-.*\"} [%dm] ) )"
return fmt.Sprintf(errorRatioQuery, ns, podName, errorCodeRegex, durationInMinutes, ns, podName, durationInMinutes)
}
func getRestCallsFailedWarning(failingCallsPercentage int, component, duration string) string {
const restCallsFailWarningTemplate = "More than %d%% of the rest calls failed in %s for the last %s"
return fmt.Sprintf(restCallsFailWarningTemplate, failingCallsPercentage, component, duration)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package alerts
import (
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
)
func systemAlerts(namespace string) []promv1.Rule {
return []promv1.Rule{
{
Alert: "LowKVMNodesCount",
Expr: intstr.FromString("(kubevirt_allocatable_nodes > 1) and (kubevirt_nodes_with_kvm < 2)"),
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"description": "Low number of nodes with KVM resource available.",
"summary": "At least two nodes with kvm resource required for VM live migration.",
},
Labels: map[string]string{
severityAlertLabelKey: "warning",
operatorHealthImpactLabelKey: "warning",
},
},
{
Alert: "KubeVirtNoAvailableNodesToRunVMs",
Expr: intstr.FromString("((sum(kube_node_status_allocatable{resource='devices_kubevirt_io_kvm'}) or on() vector(0)) == 0 and (sum(kubevirt_configuration_emulation_enabled) or on() vector(0)) == 0) or (sum(kube_node_labels{label_kubevirt_io_schedulable='true'}) or on() vector(0)) == 0"),
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": "There are no available nodes in the cluster to run VMs.",
},
Labels: map[string]string{
severityAlertLabelKey: "warning",
operatorHealthImpactLabelKey: "critical",
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package alerts
import (
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
)
func virtApiAlerts(namespace string) []promv1.Rule {
return []promv1.Rule{
{
Alert: "VirtAPIDown",
Expr: intstr.FromString("kubevirt_virt_api_up == 0"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "All virt-api servers are down.",
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
{
Alert: "LowVirtAPICount",
Expr: intstr.FromString("(kubevirt_allocatable_nodes > 1) and (kubevirt_virt_api_up < 2)"),
For: ptr.To(promv1.Duration("60m")),
Annotations: map[string]string{
"summary": "More than one virt-api should be running if more than one worker nodes exist.",
},
Labels: map[string]string{
severityAlertLabelKey: "warning",
operatorHealthImpactLabelKey: "warning",
},
},
{
Alert: "VirtApiRESTErrorsBurst",
Expr: intstr.FromString(getErrorRatio(namespace, "virt-api", "(4|5)[0-9][0-9]", 5) + " >= 0.8"),
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": getRestCallsFailedWarning(80, "virt-api", durationFiveMinutes),
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
{
Alert: "KubeVirtDeprecatedAPIRequested",
Expr: intstr.FromString("sum by (resource,group,version) ((round(increase(kubevirt_api_request_deprecated_total{verb!~\"LIST|WATCH\"}[10m])) > 0 and kubevirt_api_request_deprecated_total{verb!~\"LIST|WATCH\"} offset 10m) or (kubevirt_api_request_deprecated_total{verb!~\"LIST|WATCH\"} != 0 unless kubevirt_api_request_deprecated_total{verb!~\"LIST|WATCH\"} offset 10m))"),
Annotations: map[string]string{
"description": "Detected requests to the deprecated {{ $labels.resource }}.{{ $labels.group }}/{{ $labels.version }} API.",
"summary": "Detected {{ $value }} requests in the last 10 minutes.",
},
Labels: map[string]string{
severityAlertLabelKey: "info",
operatorHealthImpactLabelKey: "none",
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package alerts
import (
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
)
func virtControllerAlerts(namespace string) []promv1.Rule {
return []promv1.Rule{
{
Alert: "LowReadyVirtControllersCount",
Expr: intstr.FromString("kubevirt_virt_controller_ready < cluster:kubevirt_virt_controller_pods_running:count"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "Some virt controllers are running but not ready.",
},
Labels: map[string]string{
severityAlertLabelKey: "warning",
operatorHealthImpactLabelKey: "warning",
},
},
{
Alert: "NoReadyVirtController",
Expr: intstr.FromString("kubevirt_virt_controller_ready == 0"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "No ready virt-controller was detected for the last 10 min.",
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
{
Alert: "VirtControllerDown",
Expr: intstr.FromString("cluster:kubevirt_virt_controller_pods_running:count == 0"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "No running virt-controller was detected for the last 10 min.",
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
{
Alert: "LowVirtControllersCount",
Expr: intstr.FromString("(kubevirt_allocatable_nodes > 1) and (kubevirt_virt_controller_ready < 2)"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "More than one virt-controller should be ready if more than one worker node.",
},
Labels: map[string]string{
severityAlertLabelKey: "warning",
operatorHealthImpactLabelKey: "warning",
},
},
{
Alert: "VirtControllerRESTErrorsBurst",
Expr: intstr.FromString(getErrorRatio(namespace, "virt-controller", "(4|5)[0-9][0-9]", 5) + " >= 0.8"),
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": getRestCallsFailedWarning(80, "virt-controller", durationFiveMinutes),
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package alerts
import (
"fmt"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
)
func virtHandlerAlerts(namespace string) []promv1.Rule {
return []promv1.Rule{
{
Alert: "VirtHandlerDaemonSetRolloutFailing",
Expr: intstr.FromString(
fmt.Sprintf("(%s - %s) != 0",
fmt.Sprintf("kube_daemonset_status_number_ready{namespace='%s', daemonset='virt-handler'}", namespace),
fmt.Sprintf("kube_daemonset_status_desired_number_scheduled{namespace='%s', daemonset='virt-handler'}", namespace))),
For: ptr.To(promv1.Duration("15m")),
Annotations: map[string]string{
"summary": "Some virt-handlers failed to roll out",
},
Labels: map[string]string{
severityAlertLabelKey: "warning",
operatorHealthImpactLabelKey: "warning",
},
},
{
Alert: "VirtHandlerRESTErrorsBurst",
Expr: intstr.FromString(getErrorRatio(namespace, "virt-handler", "(4|5)[0-9][0-9]", 5) + " >= 0.8"),
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": getRestCallsFailedWarning(80, "virt-handler", durationFiveMinutes),
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package alerts
import (
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
)
func virtOperatorAlerts(namespace string) []promv1.Rule {
return []promv1.Rule{
{
Alert: "VirtOperatorDown",
Expr: intstr.FromString("kubevirt_virt_operator_up == 0"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "All virt-operator servers are down.",
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
{
Alert: "LowVirtOperatorCount",
Expr: intstr.FromString("(kubevirt_allocatable_nodes > 1) and (kubevirt_virt_operator_up < 2)"),
For: ptr.To(promv1.Duration("60m")),
Annotations: map[string]string{
"summary": "More than one virt-operator should be running if more than one worker nodes exist.",
},
Labels: map[string]string{
severityAlertLabelKey: "warning",
operatorHealthImpactLabelKey: "warning",
},
},
{
Alert: "VirtOperatorRESTErrorsBurst",
Expr: intstr.FromString(getErrorRatio(namespace, "virt-operator", "(4|5)[0-9][0-9]", 5) + " >= 0.8"),
For: ptr.To(promv1.Duration("5m")),
Annotations: map[string]string{
"summary": getRestCallsFailedWarning(80, "virt-operator", durationFiveMinutes),
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
{
Alert: "LowReadyVirtOperatorsCount",
Expr: intstr.FromString("kubevirt_virt_operator_ready < kubevirt_virt_operator_up"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "Some virt-operators are running but not ready.",
},
Labels: map[string]string{
severityAlertLabelKey: "warning",
operatorHealthImpactLabelKey: "warning",
},
},
{
Alert: "NoReadyVirtOperator",
Expr: intstr.FromString("kubevirt_virt_operator_ready == 0"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "No ready virt-operator was detected for the last 10 min.",
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
{
Alert: "NoLeadingVirtOperator",
Expr: intstr.FromString("kubevirt_virt_operator_leading == 0"),
For: ptr.To(promv1.Duration("10m")),
Annotations: map[string]string{
"summary": "No leading virt-operator was detected for the last 10 min.",
},
Labels: map[string]string{
severityAlertLabelKey: "critical",
operatorHealthImpactLabelKey: "critical",
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package recordingrules
import "github.com/rhobs/operator-observability-toolkit/pkg/operatorrules"
func Register(namespace string) error {
return operatorrules.RegisterRecordingRules(
apiRecordingRules,
nodesRecordingRules,
operatorRecordingRules,
virtRecordingRules(namespace),
vmRecordingRules,
vmiRecordingRules,
vmsnapshotRecordingRules,
)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package recordingrules
import (
"fmt"
"github.com/rhobs/operator-observability-toolkit/pkg/operatormetrics"
"github.com/rhobs/operator-observability-toolkit/pkg/operatorrules"
"k8s.io/apimachinery/pkg/util/intstr"
)
func virtRecordingRules(namespace string) []operatorrules.RecordingRule {
return []operatorrules.RecordingRule{
{
MetricsOpts: operatormetrics.MetricOpts{
Name: "kubevirt_virt_api_up",
Help: "The number of virt-api pods that are up.",
},
MetricType: operatormetrics.GaugeType,
Expr: intstr.FromString(
fmt.Sprintf("sum(up{namespace='%s', pod=~'virt-api-.*'}) or vector(0)", namespace),
),
},
{
MetricsOpts: operatormetrics.MetricOpts{
Name: "cluster:kubevirt_virt_controller_pods_running:count",
Help: "The number of virt-controller pods that are running.",
},
MetricType: operatormetrics.GaugeType,
Expr: intstr.FromString(
fmt.Sprintf("count(kube_pod_status_phase{pod=~'virt-controller-.*', namespace='%s', phase='Running'} == 1) or vector(0)", namespace),
),
},
{
MetricsOpts: operatormetrics.MetricOpts{
Name: "kubevirt_virt_controller_up",
Help: "The number of virt-controller pods that are up.",
},
MetricType: operatormetrics.GaugeType,
Expr: intstr.FromString(
fmt.Sprintf("sum(up{pod=~'virt-controller-.*', namespace='%s'}) or vector(0)", namespace),
),
},
{
MetricsOpts: operatormetrics.MetricOpts{
Name: "kubevirt_virt_controller_ready",
Help: "The number of virt-controller pods that are ready.",
},
MetricType: operatormetrics.GaugeType,
Expr: intstr.FromString(
fmt.Sprintf("count(kube_pod_status_ready{pod=~'virt-controller-.*', namespace='%s', condition='true'} + on(pod, namespace) kubevirt_virt_controller_ready_status{namespace='%s'}) or vector(0)", namespace, namespace),
),
},
{
MetricsOpts: operatormetrics.MetricOpts{
Name: "kubevirt_virt_operator_up",
Help: "The number of virt-operator pods that are up.",
},
MetricType: operatormetrics.GaugeType,
Expr: intstr.FromString(
fmt.Sprintf("sum(up{namespace='%s', pod=~'virt-operator-.*'}) or vector(0)", namespace),
),
},
{
MetricsOpts: operatormetrics.MetricOpts{
Name: "kubevirt_virt_operator_ready",
Help: "The number of virt-operator pods that are ready.",
},
MetricType: operatormetrics.GaugeType,
Expr: intstr.FromString(
fmt.Sprintf("sum(kube_pod_status_ready{pod=~'virt-operator-.*', condition='true', namespace='%s'} * on (pod) kubevirt_virt_operator_ready_status{namespace='%s'}) or vector(0)", namespace, namespace),
),
},
{
MetricsOpts: operatormetrics.MetricOpts{
Name: "kubevirt_virt_operator_leading",
Help: "The number of virt-operator pods that are leading.",
},
MetricType: operatormetrics.GaugeType,
Expr: intstr.FromString(
fmt.Sprintf("sum(kubevirt_virt_operator_leading_status{namespace='%s'})", namespace),
),
},
{
MetricsOpts: operatormetrics.MetricOpts{
Name: "kubevirt_virt_handler_up",
Help: "The number of virt-handler pods that are up.",
},
MetricType: operatormetrics.GaugeType,
Expr: intstr.FromString(fmt.Sprintf("sum(up{pod=~'virt-handler-.*', namespace='%s'}) or vector(0)", namespace)),
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package rules
import (
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/rhobs/operator-observability-toolkit/pkg/operatorrules"
"kubevirt.io/kubevirt/pkg/monitoring/rules/alerts"
"kubevirt.io/kubevirt/pkg/monitoring/rules/recordingrules"
)
const (
kubevirtPrometheusRuleName = "prometheus-kubevirt-rules"
prometheusLabelKey = "prometheus.kubevirt.io"
prometheusLabelValue = "true"
k8sAppLabelKey = "k8s-app"
kubevirtLabelValue = "kubevirt"
)
func SetupRules(namespace string) error {
err := recordingrules.Register(namespace)
if err != nil {
return err
}
err = alerts.Register(namespace)
if err != nil {
return err
}
return nil
}
func BuildPrometheusRule(namespace string) (*promv1.PrometheusRule, error) {
rules, err := operatorrules.BuildPrometheusRule(
kubevirtPrometheusRuleName,
namespace,
map[string]string{
prometheusLabelKey: prometheusLabelValue,
k8sAppLabelKey: kubevirtLabelValue,
},
)
if err != nil {
return nil, err
}
return rules, nil
}
func ListRecordingRules() []operatorrules.RecordingRule {
return operatorrules.ListRecordingRules()
}
func ListAlerts() []promv1.Rule {
return operatorrules.ListAlerts()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
"fmt"
"kubevirt.io/kubevirt/pkg/network/vmispec"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
)
func validateInterfaceStateValue(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
for idx, iface := range spec.Domain.Devices.Interfaces {
if iface.State != "" &&
iface.State != v1.InterfaceStateAbsent &&
iface.State != v1.InterfaceStateLinkDown &&
iface.State != v1.InterfaceStateLinkUp {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("logical %s interface state value is unsupported: %s", iface.Name, iface.State),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("state").String(),
})
}
if iface.SRIOV != nil &&
(iface.State == v1.InterfaceStateLinkDown || iface.State == v1.InterfaceStateLinkUp) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%q interface's state %q is not supported for SR-IOV NICs", iface.Name, iface.State),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("state").String(),
})
}
if iface.State == v1.InterfaceStateAbsent && iface.Bridge == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%q interface's state %q is supported only for bridge binding", iface.Name, iface.State),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("state").String(),
})
}
defaultNetwork := vmispec.LookUpDefaultNetwork(spec.Networks)
if iface.State == v1.InterfaceStateAbsent && defaultNetwork != nil && defaultNetwork.Name == iface.Name {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%q interface's state %q is not supported on default networks", iface.Name, iface.State),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("state").String(),
})
}
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/network/link"
"kubevirt.io/kubevirt/pkg/network/vmispec"
)
func validateInterfaceBinding(
fieldPath *field.Path, spec *v1.VirtualMachineInstanceSpec, config clusterConfigChecker,
) []metav1.StatusCause {
var causes []metav1.StatusCause
networksByName := vmispec.IndexNetworkSpecByName(spec.Networks)
for idx, iface := range spec.Domain.Devices.Interfaces {
causes = append(causes, validateInterfaceBindingExists(fieldPath, idx, iface)...)
causes = append(causes, validateMasqueradeBinding(fieldPath, idx, iface, networksByName[iface.Name])...)
causes = append(causes, validateBridgeBinding(fieldPath, idx, iface, networksByName[iface.Name], config)...)
causes = append(causes, validateMacvtapBinding(fieldPath, idx, iface, networksByName[iface.Name], config)...)
causes = append(causes, validatePasstBinding(fieldPath, idx, iface, networksByName[iface.Name], config)...)
}
return causes
}
func validateInterfaceBindingExists(fieldPath *field.Path, idx int, iface v1.Interface) []metav1.StatusCause {
if iface.Binding != nil && hasInterfaceBindingMethod(iface) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("logical %s interface cannot have both binding plugin and interface binding method", iface.Name),
Field: fieldPath.Child("domain", "devices", "interfaces").Index(idx).Child("binding").String(),
}}
}
return nil
}
func hasInterfaceBindingMethod(iface v1.Interface) bool {
return iface.InterfaceBindingMethod.Bridge != nil ||
iface.InterfaceBindingMethod.DeprecatedSlirp != nil ||
iface.InterfaceBindingMethod.Masquerade != nil ||
iface.InterfaceBindingMethod.SRIOV != nil ||
iface.InterfaceBindingMethod.DeprecatedMacvtap != nil ||
iface.InterfaceBindingMethod.DeprecatedPasst != nil
}
func validateMasqueradeBinding(fieldPath *field.Path, idx int, iface v1.Interface, net v1.Network) []metav1.StatusCause {
var causes []metav1.StatusCause
if iface.Masquerade != nil && net.Pod == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Masquerade interface only implemented with pod network",
Field: fieldPath.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
})
}
if iface.Masquerade != nil && link.IsReserved(iface.MacAddress) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "The requested MAC address is reserved for the in-pod bridge. Please choose another one.",
Field: fieldPath.Child("domain", "devices", "interfaces").Index(idx).Child("macAddress").String(),
})
}
return causes
}
func validateBridgeBinding(
fieldPath *field.Path, idx int, iface v1.Interface, net v1.Network, config clusterConfigChecker,
) []metav1.StatusCause {
if iface.InterfaceBindingMethod.Bridge != nil && net.NetworkSource.Pod != nil && !config.IsBridgeInterfaceOnPodNetworkEnabled() {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Bridge on pod network configuration is not enabled under kubevirt-config",
Field: fieldPath.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
}}
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
)
func validateMacvtapBinding(
fieldPath *field.Path, idx int, iface v1.Interface, net v1.Network, config clusterConfigChecker,
) []metav1.StatusCause {
var causes []metav1.StatusCause
if iface.InterfaceBindingMethod.DeprecatedMacvtap != nil && !config.MacvtapEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Macvtap feature gate is not enabled",
Field: fieldPath.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
})
}
if iface.InterfaceBindingMethod.DeprecatedMacvtap != nil && net.NetworkSource.Multus == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Macvtap interface only implemented with Multus network",
Field: fieldPath.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
})
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
"fmt"
"net"
"regexp"
"kubevirt.io/kubevirt/pkg/network/link"
"kubevirt.io/kubevirt/pkg/network/vmispec"
hwutil "kubevirt.io/kubevirt/pkg/util/hardware"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8svalidation "k8s.io/apimachinery/pkg/util/validation"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
)
func validateNetworksAssignedToInterfaces(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
const nameOfTypeNotFoundMessagePattern = "%s '%s' not found."
interfaceSet := vmispec.IndexInterfaceSpecByName(spec.Domain.Devices.Interfaces)
for i, network := range spec.Networks {
if _, exists := interfaceSet[network.Name]; !exists {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf(nameOfTypeNotFoundMessagePattern, field.Child("networks").Index(i).Child("name").String(), network.Name),
Field: field.Child("networks").Index(i).Child("name").String(),
})
}
}
return causes
}
func validateInterfacesAssignedToNetworks(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
const nameOfTypeNotFoundMessagePattern = "%s '%s' not found."
networkSet := vmispec.IndexNetworkSpecByName(spec.Networks)
for idx, iface := range spec.Domain.Devices.Interfaces {
if _, exists := networkSet[iface.Name]; !exists {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(
nameOfTypeNotFoundMessagePattern,
field.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
iface.Name,
),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
})
}
}
return causes
}
func validateNetworkNameUnique(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
networkSet := map[string]struct{}{}
for i, network := range spec.Networks {
if _, exists := networkSet[network.Name]; exists {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueDuplicate,
Message: fmt.Sprintf("Network with name %q already exists, every network must have a unique name", network.Name),
Field: field.Child("networks").Index(i).Child("name").String(),
})
}
networkSet[network.Name] = struct{}{}
}
return causes
}
func validateInterfaceNameUnique(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
ifaceSet := map[string]struct{}{}
for idx, iface := range spec.Domain.Devices.Interfaces {
if _, exists := ifaceSet[iface.Name]; exists {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueDuplicate,
Message: "Only one interface can be connected to one specific network",
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
})
}
ifaceSet[iface.Name] = struct{}{}
}
return causes
}
func validateInterfacesFields(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
networksByName := vmispec.IndexNetworkSpecByName(spec.Networks)
for idx, iface := range spec.Domain.Devices.Interfaces {
causes = append(causes, validateInterfaceNameFormat(field, idx, iface)...)
causes = append(causes, validateInterfaceModel(field, idx, iface)...)
causes = append(causes, validateMacAddress(field, idx, iface)...)
causes = append(causes, validatePciAddress(field, idx, iface)...)
causes = append(causes, validatePortConfiguration(field, idx, iface, networksByName[iface.Name])...)
causes = append(causes, validateDHCPOptions(field, idx, iface)...)
}
return causes
}
func validateInterfaceNameFormat(field *k8sfield.Path, idx int, iface v1.Interface) []metav1.StatusCause {
isValid := regexp.MustCompile(`^[A-Za-z0-9-_]+$`).MatchString
if !isValid(iface.Name) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Network interface name can only contain alphabetical characters, numbers, dashes (-) or underscores (_)",
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
}}
}
return nil
}
var validInterfaceModels = map[string]struct{}{
"e1000": {},
"e1000e": {},
"igb": {},
"ne2k_pci": {},
"pcnet": {},
"rtl8139": {},
v1.VirtIO: {},
}
func validateInterfaceModel(field *k8sfield.Path, idx int, iface v1.Interface) []metav1.StatusCause {
if iface.Model != "" {
if _, exists := validInterfaceModels[iface.Model]; !exists {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf(
"interface %s uses model %s that is not supported.",
field.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
iface.Model,
),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("model").String(),
}}
}
}
return nil
}
func validateMacAddress(field *k8sfield.Path, idx int, iface v1.Interface) []metav1.StatusCause {
var causes []metav1.StatusCause
if err := link.ValidateMacAddress(iface.MacAddress); err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(
"interface %s has %s.",
field.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
err.Error(),
),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("macAddress").String(),
})
}
return causes
}
func validatePciAddress(field *k8sfield.Path, idx int, iface v1.Interface) []metav1.StatusCause {
if iface.PciAddress != "" {
_, err := hwutil.ParsePciAddress(iface.PciAddress)
if err != nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(
"interface %s has malformed PCI address (%s).",
field.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
iface.PciAddress,
),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("pciAddress").String(),
}}
}
}
return nil
}
func validatePortConfiguration(field *k8sfield.Path, idx int, iface v1.Interface, network v1.Network) []metav1.StatusCause {
var causes []metav1.StatusCause
if network.Pod != nil && iface.Ports != nil {
causes = append(causes, validateForwardPortName(field, idx, iface.Ports)...)
for portIdx, forwardPort := range iface.Ports {
causes = append(causes, validateForwardPortNonZero(field, idx, forwardPort, portIdx)...)
causes = append(causes, validateForwardPortInRange(field, idx, forwardPort, portIdx)...)
causes = append(causes, validateForwardPortProtocol(field, idx, forwardPort, portIdx)...)
}
}
return causes
}
func validateForwardPortName(field *k8sfield.Path, idx int, ports []v1.Port) []metav1.StatusCause {
var causes []metav1.StatusCause
portForwardMap := map[string]struct{}{}
for portIdx, forwardPort := range ports {
if forwardPort.Name == "" {
continue
}
if _, ok := portForwardMap[forwardPort.Name]; ok {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueDuplicate,
Message: fmt.Sprintf("Duplicate name of the port: %s", forwardPort.Name),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("ports").Index(portIdx).Child("name").String(),
})
}
if msgs := k8svalidation.IsValidPortName(forwardPort.Name); len(msgs) != 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Invalid name of the port: %s", forwardPort.Name),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("ports").Index(portIdx).Child("name").String(),
})
}
portForwardMap[forwardPort.Name] = struct{}{}
}
return causes
}
func validateForwardPortProtocol(field *k8sfield.Path, idx int, forwardPort v1.Port, portIdx int) (causes []metav1.StatusCause) {
if forwardPort.Protocol != "" {
if forwardPort.Protocol != "TCP" && forwardPort.Protocol != "UDP" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Unknown protocol, only TCP or UDP allowed",
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("ports").Index(portIdx).Child("protocol").String(),
})
}
}
return causes
}
func validateForwardPortInRange(field *k8sfield.Path, idx int, forwardPort v1.Port, portIdx int) (causes []metav1.StatusCause) {
if forwardPort.Port < 0 || forwardPort.Port > 65536 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Port field must be in range 0 < x < 65536.",
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("ports").Index(portIdx).String(),
})
}
return causes
}
func validateForwardPortNonZero(field *k8sfield.Path, idx int, forwardPort v1.Port, portIdx int) (causes []metav1.StatusCause) {
if forwardPort.Port == 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: "Port field is mandatory.",
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("ports").Index(portIdx).String(),
})
}
return causes
}
func validateDHCPOptions(field *k8sfield.Path, idx int, iface v1.Interface) []metav1.StatusCause {
var causes []metav1.StatusCause
if iface.DHCPOptions != nil {
causes = append(causes, validateDHCPExtraOptions(field, iface)...)
causes = append(causes, validateDHCPNTPServersAreValidIPv4Addresses(field, iface, idx)...)
}
return causes
}
func validateDHCPExtraOptions(field *k8sfield.Path, iface v1.Interface) []metav1.StatusCause {
var causes []metav1.StatusCause
privateOptions := iface.DHCPOptions.PrivateOptions
if countUniqueDHCPPrivateOptions(privateOptions) < len(privateOptions) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Found Duplicates: you have provided duplicate DHCPPrivateOptions",
Field: field.String(),
})
}
for _, DHCPPrivateOption := range privateOptions {
causes = append(causes, validateDHCPPrivateOptionsWithinRange(field, DHCPPrivateOption)...)
}
return causes
}
func validateDHCPNTPServersAreValidIPv4Addresses(field *k8sfield.Path, iface v1.Interface, idx int) (causes []metav1.StatusCause) {
if iface.DHCPOptions != nil {
for index, ip := range iface.DHCPOptions.NTPServers {
if net.ParseIP(ip).To4() == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "NTP servers must be a list of valid IPv4 addresses.",
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("dhcpOptions", "ntpServers").Index(index).String(),
})
}
}
}
return causes
}
func validateDHCPPrivateOptionsWithinRange(field *k8sfield.Path, dhcpPrivateOption v1.DHCPPrivateOptions) (causes []metav1.StatusCause) {
if !(dhcpPrivateOption.Option >= 224 && dhcpPrivateOption.Option <= 254) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "provided DHCPPrivateOptions are out of range, must be in range 224 to 254",
Field: field.String(),
})
}
return causes
}
func countUniqueDHCPPrivateOptions(privateOptions []v1.DHCPPrivateOptions) int {
optionSet := map[int]struct{}{}
for _, DHCPPrivateOption := range privateOptions {
optionSet[DHCPPrivateOption.Option] = struct{}{}
}
return len(optionSet)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/network/vmispec"
)
func validateSinglePodNetwork(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
podNetworks := vmispec.FilterNetworksSpec(spec.Networks, func(n v1.Network) bool {
return n.Pod != nil
})
if len(podNetworks) > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueDuplicate,
Message: fmt.Sprintf("more than one interface is connected to a pod network in %s", field.Child("interfaces").String()),
Field: field.Child("interfaces").String(),
})
}
multusDefaultNetworks := vmispec.FilterNetworksSpec(spec.Networks, func(n v1.Network) bool {
return n.Multus != nil && n.Multus.Default
})
if len(multusDefaultNetworks) > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Multus CNI should only have one default network",
Field: field.Child("networks").String(),
})
}
if len(podNetworks) > 0 && len(multusDefaultNetworks) > 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Pod network cannot be defined when Multus default network is defined",
Field: field.Child("networks").String(),
})
}
return causes
}
func validateSingleNetworkSource(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
for idx, net := range spec.Networks {
if net.Pod == nil && net.Multus == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: "should have a network type",
Field: field.Child("networks").Index(idx).String(),
})
} else if net.Pod != nil && net.Multus != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: "should have only one network type",
Field: field.Child("networks").Index(idx).String(),
})
}
}
return causes
}
func validateMultusNetworkSource(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
for idx, net := range spec.Networks {
if net.Multus != nil && net.Multus.NetworkName == "" {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueRequired,
Message: "CNI delegating plugin must have a networkName",
Field: field.Child("networks").Index(idx).String(),
}}
}
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
)
func validatePasstBinding(
fieldPath *field.Path, idx int, iface v1.Interface, net v1.Network, config clusterConfigChecker,
) []metav1.StatusCause {
var causes []metav1.StatusCause
if iface.InterfaceBindingMethod.DeprecatedPasst != nil && !config.PasstEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Passt feature gate is not enabled",
Field: fieldPath.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
})
}
if iface.DeprecatedPasst != nil && net.Pod == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Passt interface only implemented with pod network",
Field: fieldPath.Child("domain", "devices", "interfaces").Index(idx).Child("name").String(),
})
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
)
func validateCreationSlirpBinding(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
for idx, ifaceSpec := range spec.Domain.Devices.Interfaces {
if ifaceSpec.DeprecatedSlirp != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Slirp interface support has been discontinued since v1.3",
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("slirp").String(),
})
}
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitter
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
)
type clusterConfigChecker interface {
IsBridgeInterfaceOnPodNetworkEnabled() bool
MacvtapEnabled() bool
PasstEnabled() bool
}
type Validator struct {
field *k8sfield.Path
vmiSpec *v1.VirtualMachineInstanceSpec
configChecker clusterConfigChecker
}
func NewValidator(field *k8sfield.Path, vmiSpec *v1.VirtualMachineInstanceSpec, configChecker clusterConfigChecker) *Validator {
return &Validator{
field: field,
vmiSpec: vmiSpec,
configChecker: configChecker,
}
}
func (v Validator) Validate() []metav1.StatusCause {
var causes []metav1.StatusCause
causes = append(causes, validateSinglePodNetwork(v.field, v.vmiSpec)...)
causes = append(causes, validateSingleNetworkSource(v.field, v.vmiSpec)...)
causes = append(causes, validateMultusNetworkSource(v.field, v.vmiSpec)...)
causes = append(causes, validateInterfaceStateValue(v.field, v.vmiSpec)...)
causes = append(causes, validateInterfaceBinding(v.field, v.vmiSpec, v.configChecker)...)
causes = append(causes, validateNetworkNameUnique(v.field, v.vmiSpec)...)
causes = append(causes, validateNetworksAssignedToInterfaces(v.field, v.vmiSpec)...)
causes = append(causes, validateInterfaceNameUnique(v.field, v.vmiSpec)...)
causes = append(causes, validateInterfacesAssignedToNetworks(v.field, v.vmiSpec)...)
causes = append(causes, validateInterfacesFields(v.field, v.vmiSpec)...)
return causes
}
func (v Validator) ValidateCreation() []metav1.StatusCause {
var causes []metav1.StatusCause
causes = append(causes, validateCreationSlirpBinding(v.field, v.vmiSpec)...)
return causes
}
func ValidateCreation(field *k8sfield.Path, vmiSpec *v1.VirtualMachineInstanceSpec, clusterCfg clusterConfigChecker) []metav1.StatusCause {
networkValidator := NewValidator(field, vmiSpec, clusterCfg)
return networkValidator.ValidateCreation()
}
func Validate(field *k8sfield.Path, vmiSpec *v1.VirtualMachineInstanceSpec, clusterCfg clusterConfigChecker) []metav1.StatusCause {
netValidator := NewValidator(field, vmiSpec, clusterCfg)
var statusCauses []metav1.StatusCause
statusCauses = append(statusCauses, netValidator.ValidateCreation()...)
statusCauses = append(statusCauses, netValidator.Validate()...)
return statusCauses
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package cache
import (
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
dutils "kubevirt.io/kubevirt/pkg/ephemeral-disk-utils"
kfs "kubevirt.io/kubevirt/pkg/os/fs"
)
type Cache struct {
path string
fs cacheFS
}
type cacheFS interface {
Stat(name string) (os.FileInfo, error)
MkdirAll(path string, perm os.FileMode) error
RemoveAll(path string) error
ReadFile(filename string) ([]byte, error)
WriteFile(filename string, data []byte, perm fs.FileMode) error
}
type CacheCreator struct{}
func (_ CacheCreator) New(filePath string) *Cache {
return NewCustomCache(filePath, kfs.New())
}
func NewCustomCache(path string, fs cacheFS) *Cache {
return &Cache{path, fs}
}
func (c Cache) Entry(path string) (Cache, error) {
fileInfo, err := c.fs.Stat(c.path)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return Cache{}, fmt.Errorf("unable to define entry: %v", err)
}
} else if !fileInfo.IsDir() {
return Cache{}, fmt.Errorf("unable to define entry: parent cache has an existing store")
}
return Cache{
path: filepath.Join(c.path, path),
fs: c.fs,
}, nil
}
func (c Cache) Read(data interface{}) (interface{}, error) {
err := readFromCachedFile(c.fs, data, c.path)
return data, err
}
func (c Cache) Write(data interface{}) error {
return writeToCachedFile(c.fs, data, c.path)
}
func (c Cache) Delete() error {
return c.fs.RemoveAll(c.path)
}
type cacheCreator interface {
New(filePath string) *Cache
}
func writeToCachedFile(fs cacheFS, obj interface{}, fileName string) error {
if err := fs.MkdirAll(filepath.Dir(fileName), 0750); err != nil {
return err
}
buf, err := json.MarshalIndent(&obj, "", " ")
if err != nil {
return fmt.Errorf("error marshaling cached object: %v", err)
}
err = fs.WriteFile(fileName, buf, 0604)
if err != nil {
return fmt.Errorf("error writing cached object: %v", err)
}
return dutils.DefaultOwnershipManager.UnsafeSetFileOwnership(fileName)
}
func readFromCachedFile(fs cacheFS, obj interface{}, fileName string) error {
buf, err := fs.ReadFile(fileName)
if err != nil {
return err
}
err = json.Unmarshal(buf, &obj)
if err != nil {
return fmt.Errorf("error unmarshaling cached object: %v", err)
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package cache
import (
"fmt"
"net"
"path/filepath"
"github.com/vishvananda/netlink"
"kubevirt.io/kubevirt/pkg/util"
)
type DHCPInterfaceCache struct {
cache *Cache
}
func ReadDHCPInterfaceCache(c cacheCreator, pid, ifaceName string) (*DHCPConfig, error) {
dhcpCache, err := NewDHCPInterfaceCache(c, pid).IfaceEntry(ifaceName)
if err != nil {
return nil, err
}
return dhcpCache.Read()
}
func WriteDHCPInterfaceCache(c cacheCreator, pid, ifaceName string, dhcpConfig *DHCPConfig) error {
dhcpCache, err := NewDHCPInterfaceCache(c, pid).IfaceEntry(ifaceName)
if err != nil {
return err
}
return dhcpCache.Write(dhcpConfig)
}
func DeleteDHCPInterfaceCache(c cacheCreator, pid, ifaceName string) error {
dhcpCache, err := NewDHCPInterfaceCache(c, pid).IfaceEntry(ifaceName)
if err != nil {
return err
}
return dhcpCache.Delete()
}
func NewDHCPInterfaceCache(creator cacheCreator, pid string) DHCPInterfaceCache {
podRootFilesystemPath := fmt.Sprintf("/proc/%s/root", pid)
return DHCPInterfaceCache{creator.New(filepath.Join(podRootFilesystemPath, util.VirtPrivateDir))}
}
func (d DHCPInterfaceCache) IfaceEntry(ifaceName string) (DHCPInterfaceCache, error) {
const dhcpConfigCacheFileFormat = "vif-cache-%s.json"
cacheFileName := fmt.Sprintf(dhcpConfigCacheFileFormat, ifaceName)
cache, err := d.cache.Entry(cacheFileName)
if err != nil {
return DHCPInterfaceCache{}, err
}
return DHCPInterfaceCache{&cache}, nil
}
func (d DHCPInterfaceCache) Read() (*DHCPConfig, error) {
cachedIface := &DHCPConfig{}
_, err := d.cache.Read(cachedIface)
return cachedIface, err
}
func (d DHCPInterfaceCache) Write(dhcpConfig *DHCPConfig) error {
return d.cache.Write(dhcpConfig)
}
func (d DHCPInterfaceCache) Delete() error {
return d.cache.Delete()
}
type DHCPConfig struct {
Name string
IP netlink.Addr
IPv6 netlink.Addr
MAC net.HardwareAddr
AdvertisingIPAddr net.IP
AdvertisingIPv6Addr net.IP
Routes *[]netlink.Route
Mtu uint16
IPAMDisabled bool
Gateway net.IP
Subdomain string
}
func (d DHCPConfig) String() string {
return fmt.Sprintf(
"DHCPConfig: { Name: %s, IPv4: %s, IPv6: %s, MAC: %s, AdvertisingIPAddr: %s, MTU: %d, Gateway: %s, IPAMDisabled: %t, Routes: %v}",
d.Name,
d.IP,
d.IPv6,
d.MAC,
d.AdvertisingIPAddr,
d.Mtu,
d.Gateway,
d.IPAMDisabled,
d.Routes,
)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package cache
import (
"path/filepath"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/util"
)
type PodIfaceState int
const (
PodIfaceNetworkPreparationPending PodIfaceState = iota
PodIfaceNetworkPreparationStarted
PodIfaceNetworkPreparationFinished
)
type PodIfaceCacheData struct {
Iface *v1.Interface `json:"iface,omitempty"`
PodIP string `json:"podIP,omitempty"`
PodIPs []string `json:"podIPs,omitempty"`
State PodIfaceState `json:"networkState,omitempty"`
}
type PodInterfaceCache struct {
cache *Cache
}
func ReadPodInterfaceCache(c cacheCreator, uid, ifaceName string) (*PodIfaceCacheData, error) {
podCache, err := NewPodInterfaceCache(c, uid).IfaceEntry(ifaceName)
if err != nil {
return nil, err
}
return podCache.Read()
}
func WritePodInterfaceCache(c cacheCreator, uid, ifaceName string, cacheInterface *PodIfaceCacheData) error {
podCache, err := NewPodInterfaceCache(c, uid).IfaceEntry(ifaceName)
if err != nil {
return err
}
return podCache.Write(cacheInterface)
}
func DeletePodInterfaceCache(c cacheCreator, uid, ifaceName string) error {
podCache, err := NewPodInterfaceCache(c, uid).IfaceEntry(ifaceName)
if err != nil {
return err
}
return podCache.Remove()
}
func NewPodInterfaceCache(creator cacheCreator, uid string) PodInterfaceCache {
const podIfaceCacheDirName = "network-info-cache"
return PodInterfaceCache{creator.New(filepath.Join(util.VirtPrivateDir, podIfaceCacheDirName, uid))}
}
func (p PodInterfaceCache) IfaceEntry(ifaceName string) (PodInterfaceCache, error) {
cache, err := p.cache.Entry(ifaceName)
if err != nil {
return PodInterfaceCache{}, err
}
return PodInterfaceCache{&cache}, nil
}
func (p PodInterfaceCache) Read() (*PodIfaceCacheData, error) {
iface := &PodIfaceCacheData{}
_, err := p.cache.Read(iface)
return iface, err
}
func (p PodInterfaceCache) Write(cacheInterface *PodIfaceCacheData) error {
return p.cache.Write(cacheInterface)
}
func (p PodInterfaceCache) Remove() error {
return p.cache.Delete()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package server
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"os"
"regexp"
"strings"
"time"
dhcp "github.com/krolaw/dhcp4"
"github.com/vishvananda/netlink"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/network/dns"
)
const (
infiniteLease = 999 * 24 * time.Hour
errorSearchDomainNotValid = "Search domain is not valid"
errorSearchDomainTooLong = "Search domains length exceeded allowable size"
errorNTPConfiguration = "Could not parse NTP server as IPv4 address: %s"
)
// simple domain validation regex. Put it here to avoid compiling each time.
// Note this requires that unicode domains be presented in their ASCII format
var searchDomainValidationRegex = regexp.MustCompile(`^(?:[_a-z0-9](?:[_a-z0-9-]{0,61}[a-z0-9])?\.)*(?:[a-z](?:[a-z0-9-]{0,61}[a-z0-9])?)?$`)
func SingleClientDHCPServer(
clientMAC net.HardwareAddr,
clientIP net.IP,
clientMask net.IPMask,
serverIface string,
serverIP net.IP,
routerIP net.IP,
dnsIPs [][]byte,
routes *[]netlink.Route,
searchDomains []string,
mtu uint16,
customDHCPOptions *v1.DHCPOptions) error {
log.Log.Info("Starting SingleClientDHCPServer")
hostname, err := os.Hostname()
if err != nil {
return fmt.Errorf("reading the pods hostname failed: %v", err)
}
options, err := prepareDHCPOptions(clientMask, routerIP, dnsIPs, routes, searchDomains, mtu, hostname, customDHCPOptions)
if err != nil {
return err
}
handler := &DHCPHandler{
clientIP: clientIP,
clientMAC: clientMAC,
serverIP: serverIP.To4(),
leaseDuration: infiniteLease,
options: options,
}
l, err := NewUDP4FilterListener(serverIface, ":67")
if err != nil {
return err
}
defer closeDHCPServerIgnoringError(l)
err = dhcp.Serve(l, handler)
if err != nil {
return err
}
return nil
}
func closeDHCPServerIgnoringError(l ServeIfConn) {
if err := l.Close(); err != nil {
log.Log.Warningf("failed to close DHCP server connection: %v", err)
}
}
func prepareDHCPOptions(
clientMask net.IPMask,
routerIP net.IP,
dnsIPs [][]byte,
routes *[]netlink.Route,
searchDomains []string,
mtu uint16,
hostname string,
customDHCPOptions *v1.DHCPOptions) (dhcp.Options, error) {
mtuArray := make([]byte, 2)
binary.BigEndian.PutUint16(mtuArray, mtu)
dhcpOptions := dhcp.Options{
dhcp.OptionDomainNameServer: bytes.Join(dnsIPs, nil),
dhcp.OptionInterfaceMTU: mtuArray,
}
if len(clientMask) != 0 {
dhcpOptions[dhcp.OptionSubnetMask] = clientMask
}
if len(routerIP) != 0 {
dhcpOptions[dhcp.OptionRouter] = routerIP.To4()
}
netRoutes := formClasslessRoutes(routes)
if len(netRoutes) != 0 {
dhcpOptions[dhcp.OptionClasslessRouteFormat] = netRoutes
}
searchDomainBytes, err := convertSearchDomainsToBytes(searchDomains)
if err != nil {
return nil, err
}
if searchDomainBytes != nil {
dhcpOptions[dhcp.OptionDomainSearch] = searchDomainBytes
}
dhcpOptions[dhcp.OptionHostName] = []byte(hostname)
// Windows will ask for the domain name and use it for DNS resolution
domainName := dns.GetDomainName(searchDomains)
if len(domainName) > 0 {
dhcpOptions[dhcp.OptionDomainName] = []byte(domainName)
}
if customDHCPOptions != nil {
if customDHCPOptions.TFTPServerName != "" {
log.Log.Infof("Setting dhcp option tftp server name to %s", customDHCPOptions.TFTPServerName)
dhcpOptions[dhcp.OptionTFTPServerName] = []byte(customDHCPOptions.TFTPServerName)
}
if customDHCPOptions.BootFileName != "" {
log.Log.Infof("Setting dhcp option boot file name to %s", customDHCPOptions.BootFileName)
dhcpOptions[dhcp.OptionBootFileName] = []byte(customDHCPOptions.BootFileName)
}
if len(customDHCPOptions.NTPServers) > 0 {
log.Log.Infof("Setting dhcp option NTP server name to %s", customDHCPOptions.NTPServers)
ntpServers := [][]byte{}
for _, server := range customDHCPOptions.NTPServers {
ip := net.ParseIP(server).To4()
if ip == nil {
return nil, fmt.Errorf(errorNTPConfiguration, server)
}
ntpServers = append(ntpServers, []byte(ip))
}
dhcpOptions[dhcp.OptionNetworkTimeProtocolServers] = bytes.Join(ntpServers, nil)
}
if customDHCPOptions.PrivateOptions != nil {
for _, privateOptions := range customDHCPOptions.PrivateOptions {
if privateOptions.Option >= 224 && privateOptions.Option <= 254 {
dhcpOptions[dhcp.OptionCode(byte(privateOptions.Option))] = []byte(privateOptions.Value)
}
}
}
}
return dhcpOptions, nil
}
type DHCPHandler struct {
serverIP net.IP
clientIP net.IP
clientMAC net.HardwareAddr
leaseDuration time.Duration
options dhcp.Options
}
func (h *DHCPHandler) ServeDHCP(p dhcp.Packet, msgType dhcp.MessageType, _ dhcp.Options) (d dhcp.Packet) {
log.Log.V(4).Info("Serving a new request")
if len(h.clientMAC) != 0 {
if mac := p.CHAddr(); !bytes.Equal(mac, h.clientMAC) {
log.Log.V(4).Info("The request is not from our client")
return nil // Is not our client
}
}
switch msgType {
case dhcp.Discover:
log.Log.V(4).Info("The request has message type DISCOVER")
return dhcp.ReplyPacket(p, dhcp.Offer, h.serverIP, h.clientIP, h.leaseDuration,
h.options.SelectOrderOrAll(nil))
case dhcp.Request:
log.Log.V(4).Info("The request has message type REQUEST")
return dhcp.ReplyPacket(p, dhcp.ACK, h.serverIP, h.clientIP, h.leaseDuration,
h.options.SelectOrderOrAll(nil))
default:
log.Log.V(4).Info("The request has unhandled message type")
return nil // Ignored message type
}
}
func sortRoutes(routes []netlink.Route) []netlink.Route {
// Default route must come last, otherwise it may not get applied
// because there is no route to its gateway yet
var sortedRoutes []netlink.Route
var defaultRoutes []netlink.Route
for _, route := range routes {
if route.Dst == nil {
defaultRoutes = append(defaultRoutes, route)
continue
}
sortedRoutes = append(sortedRoutes, route)
}
sortedRoutes = append(sortedRoutes, defaultRoutes...)
return sortedRoutes
}
func formClasslessRoutes(routes *[]netlink.Route) (formattedRoutes []byte) {
// See RFC4332 for additional information
// (https://tools.ietf.org/html/rfc3442)
// For example:
// routes:
// 10.0.0.0/8 , gateway: 10.1.2.3
// 192.168.1/24, gateway: 192.168.2.3
// would result in the following structure:
// []byte{8, 10, 10, 1, 2, 3, 24, 192, 168, 1, 192, 168, 2, 3}
if routes == nil {
return []byte{}
}
sortedRoutes := sortRoutes(*routes)
for _, route := range sortedRoutes {
if route.Dst == nil {
route.Dst = &net.IPNet{
IP: net.IPv4(0, 0, 0, 0),
Mask: net.CIDRMask(0, 32),
}
}
ip := route.Dst.IP.To4()
width, _ := route.Dst.Mask.Size()
octets := 0
if width > 0 {
octets = (width-1)/8 + 1
}
newRoute := append([]byte{byte(width)}, ip[0:octets]...)
gateway := route.Gw.To4()
if gateway == nil {
gateway = []byte{0, 0, 0, 0}
}
newRoute = append(newRoute, gateway...)
formattedRoutes = append(formattedRoutes, newRoute...)
}
return
}
func convertSearchDomainsToBytes(searchDomainStrings []string) ([]byte, error) {
/*
https://tools.ietf.org/html/rfc3397
https://tools.ietf.org/html/rfc1035
Option for search domain string is covered by RFC3397, option contains
RFC1035 domain data.
Convert domain strings to a DNS RFC1035 section 3.1 compatible byte slice.
This is basically just splitting the domain on dot and prepending each
substring with a byte that indicates its length. Then we join and null terminate.
"example.com" becomes:
[]byte{7, 'e', 'x', 'a', 'm', 'p', 'l', 'e', 3, 'c', 'o', 'm', 0}
Note that there is a compression scheme described in section 4.1.4 where pointers
can be used to avoid duplication. This is optional for servers, and resolv.conf
limits max search domain length anyway, so we can skip compression.
*/
var searchDomainBytes []byte
for _, domain := range searchDomainStrings {
if isValidSearchDomain(domain) {
labels := strings.Split(domain, ".")
for _, label := range labels {
searchDomainBytes = append(searchDomainBytes, byte(len(label)))
searchDomainBytes = append(searchDomainBytes, []byte(label)...)
}
searchDomainBytes = append(searchDomainBytes, 0)
} else {
return searchDomainBytes, fmt.Errorf("%s: '%s'", errorSearchDomainNotValid, domain)
}
}
// ensure we haven't gone past length limit of DHCP option data
if len(searchDomainBytes) > 255 {
return searchDomainBytes, fmt.Errorf("%s: was %d long", errorSearchDomainTooLong, len(searchDomainBytes))
}
return searchDomainBytes, nil
}
func isValidSearchDomain(domain string) bool {
if len(domain) > 253 {
return false
}
return searchDomainValidationRegex.MatchString(domain)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package server
import (
"context"
"net"
"syscall"
dhcpConn "github.com/krolaw/dhcp4/conn"
"golang.org/x/net/ipv4"
)
// Creates listener on all interfaces and then filters packets not received by interfaceName
func NewUDP4FilterListener(interfaceName, laddr string) (c ServeIfConn, e error) {
iface, err := net.InterfaceByName(interfaceName)
if err != nil {
return nil, err
}
lc := CreateListenConfig()
l, err := lc.ListenPacket(context.Background(), "udp4", laddr)
if err != nil {
return nil, err
}
defer func() {
if e != nil {
closeDHCPServerIgnoringError(l)
}
}()
p := ipv4.NewPacketConn(l)
if err := p.SetControlMessage(ipv4.FlagInterface, true); err != nil {
return nil, err
}
return dhcpConn.NewServeIf(iface.Index, p), nil
}
func CreateListenConfig() net.ListenConfig {
return net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
var opErr error
err := c.Control(func(fd uintptr) {
opErr = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
})
if err != nil {
return err
}
return opErr
},
}
}
type ServeIfConn interface {
ReadFrom(b []byte) (n int, addr net.Addr, err error)
WriteTo(b []byte, addr net.Addr) (n int, err error)
Close() error
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package serverv6
import (
"fmt"
"net"
"time"
"github.com/insomniacslk/dhcp/dhcpv6"
"github.com/insomniacslk/dhcp/dhcpv6/server6"
"golang.org/x/net/ipv6"
)
const errFmt = "%s: %v"
type FilteredConn struct {
ifIndex int
packetConn *ipv6.PacketConn
cm *ipv6.ControlMessage
}
func (fc *FilteredConn) ReadFrom(b []byte) (n int, addr net.Addr, err error) {
for { // Filter all other interfaces
n, fc.cm, addr, err = fc.packetConn.ReadFrom(b)
if err != nil || fc.cm == nil || fc.cm.IfIndex == fc.ifIndex {
break
}
}
return
}
func (fc *FilteredConn) WriteTo(b []byte, addr net.Addr) (n int, err error) {
fc.cm.Src = nil
return fc.packetConn.WriteTo(b, fc.cm, addr)
}
func (fc *FilteredConn) Close() error {
return fc.packetConn.Close()
}
func (fc *FilteredConn) LocalAddr() net.Addr {
return fc.packetConn.LocalAddr()
}
func (fc *FilteredConn) SetDeadline(t time.Time) error {
return fc.packetConn.SetDeadline(t)
}
func (fc *FilteredConn) SetReadDeadline(t time.Time) error {
return fc.packetConn.SetReadDeadline(t)
}
func (fc *FilteredConn) SetWriteDeadline(t time.Time) error {
return fc.packetConn.SetWriteDeadline(t)
}
func NewConnection(serverIface *net.Interface) (*FilteredConn, error) {
const errorString = "Failed creating connection for dhcpv6 server"
addr := &net.UDPAddr{
IP: net.IPv6unspecified,
Port: dhcpv6.DefaultServerPort,
}
udpConn, err := server6.NewIPv6UDPConn("", addr)
if err != nil {
return nil, fmt.Errorf(errFmt, errorString, err)
}
packetConn := ipv6.NewPacketConn(udpConn)
if err := packetConn.SetControlMessage(ipv6.FlagInterface, true); err != nil {
return nil, fmt.Errorf(errFmt, errorString, err)
}
group := net.UDPAddr{
IP: dhcpv6.AllDHCPRelayAgentsAndServers,
Port: dhcpv6.DefaultServerPort}
if err := packetConn.JoinGroup(serverIface, &group); err != nil {
return nil, fmt.Errorf(errFmt, errorString, err)
}
return &FilteredConn{packetConn: packetConn, ifIndex: serverIface.Index}, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package serverv6
import (
"fmt"
"net"
"time"
"github.com/insomniacslk/dhcp/dhcpv6"
"github.com/insomniacslk/dhcp/dhcpv6/server6"
"github.com/insomniacslk/dhcp/iana"
"kubevirt.io/client-go/log"
)
const (
infiniteLease = 999 * 24 * time.Hour
)
type DHCPv6Handler struct {
clientIP net.IP
modifiers []dhcpv6.Modifier
}
func SingleClientDHCPv6Server(clientIP net.IP, serverIfaceName string, ipv6Nameservers [][]byte) error {
log.Log.Info("Starting SingleClientDHCPv6Server")
iface, err := net.InterfaceByName(serverIfaceName)
if err != nil {
return fmt.Errorf("couldn't create DHCPv6 server, couldn't get the dhcp6 server interface: %v", err)
}
modifiers := prepareDHCPv6Modifiers(clientIP, iface.HardwareAddr, ipv6Nameservers)
handler := &DHCPv6Handler{
clientIP: clientIP,
modifiers: modifiers,
}
conn, err := NewConnection(iface)
if err != nil {
return fmt.Errorf("couldn't create DHCPv6 server: %v", err)
}
s, err := server6.NewServer("", nil, handler.ServeDHCPv6, server6.WithConn(conn))
if err != nil {
return fmt.Errorf("couldn't create DHCPv6 server: %v", err)
}
err = s.Serve()
if err != nil {
return fmt.Errorf("failed to run DHCPv6 server: %v", err)
}
return nil
}
func (h *DHCPv6Handler) ServeDHCPv6(conn net.PacketConn, peer net.Addr, m dhcpv6.DHCPv6) {
log.Log.V(4).Info("DHCPv6 serving a new request")
// TODO if we extend the server to support bridge binding, we need to filter out non-vm requests
response, err := h.buildResponse(m)
if err != nil {
log.Log.Reason(err).Error("DHCPv6 failed building a response to the client")
return
}
if _, err := conn.WriteTo(response.ToBytes(), peer); err != nil {
log.Log.Reason(err).Error("DHCPv6 failed sending a response to the client")
}
}
func (h *DHCPv6Handler) buildResponse(msg dhcpv6.DHCPv6) (*dhcpv6.Message, error) {
var response *dhcpv6.Message
var err error
dhcpv6Msg := msg.(*dhcpv6.Message)
switch dhcpv6Msg.Type() {
case dhcpv6.MessageTypeSolicit:
log.Log.V(4).Info("DHCPv6 - the request has message type Solicit")
if dhcpv6Msg.GetOneOption(dhcpv6.OptionRapidCommit) == nil {
response, err = dhcpv6.NewAdvertiseFromSolicit(dhcpv6Msg, h.modifiers...)
} else {
log.Log.V(4).Info("DHCPv6 - replying with rapid commit")
response, err = dhcpv6.NewReplyFromMessage(dhcpv6Msg, h.modifiers...)
}
default:
log.Log.V(4).Info("DHCPv6 - non Solicit request received")
response, err = dhcpv6.NewReplyFromMessage(dhcpv6Msg, h.modifiers...)
}
if err != nil {
return nil, err
}
ianaRequest := dhcpv6Msg.Options.OneIANA()
if ianaRequest != nil {
ianaResponse := response.Options.OneIANA()
ianaResponse.IaId = ianaRequest.IaId
response.UpdateOption(ianaResponse)
}
return response, nil
}
func prepareDHCPv6Modifiers(clientIP net.IP, serverInterfaceMac net.HardwareAddr, ipv6Nameservers [][]byte) []dhcpv6.Modifier {
optIAAddress := dhcpv6.OptIAAddress{IPv6Addr: clientIP, PreferredLifetime: infiniteLease, ValidLifetime: infiniteLease}
duid := &dhcpv6.DUIDLL{HWType: iana.HWTypeEthernet, LinkLayerAddr: serverInterfaceMac}
modifiers := []dhcpv6.Modifier{dhcpv6.WithIANA(optIAAddress), dhcpv6.WithServerID(duid)}
if len(ipv6Nameservers) > 0 {
var dnsServers []net.IP
for _, nameserver := range ipv6Nameservers {
dnsServers = append(dnsServers, net.IP(nameserver))
}
modifiers = append(modifiers, dhcpv6.WithDNS(dnsServers...))
}
return modifiers
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package dns
import (
"bufio"
"net"
"os"
"strings"
"kubevirt.io/client-go/log"
)
const (
domainSearchPrefix = "search"
nameserverPrefix = "nameserver"
defaultDNS = "8.8.8.8"
defaultSearchDomain = "cluster.local"
)
type Nameservers struct {
IPv4 [][]byte
IPv6 [][]byte
}
func ParseNameservers(content string) (*Nameservers, error) {
var ipv4Nameservers [][]byte
var ipv6Nameservers [][]byte
scanner := bufio.NewScanner(strings.NewReader(content))
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, nameserverPrefix) {
fields := strings.Fields(line)
if len(fields) != 2 {
log.Log.Warningf("Invalid resolv.conf format: nameserver line should have only one value per line '%s'", line)
continue
}
nameserver := fields[1]
parsedIP := net.ParseIP(nameserver)
if parsedIP == nil {
continue
}
if ipv4 := parsedIP.To4(); ipv4 != nil {
ipv4Nameservers = append(ipv4Nameservers, ipv4)
} else {
ipv6Nameservers = append(ipv6Nameservers, parsedIP.To16())
}
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
// apply a default DNS if none found from pod
if len(ipv4Nameservers) == 0 && len(ipv6Nameservers) == 0 {
ipv4Nameservers = append(ipv4Nameservers, net.ParseIP(defaultDNS).To4())
}
return &Nameservers{
IPv4: ipv4Nameservers,
IPv6: ipv6Nameservers,
}, nil
}
func ParseSearchDomains(content string) ([]string, error) {
var searchDomains []string
scanner := bufio.NewScanner(strings.NewReader(content))
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, domainSearchPrefix) {
doms := strings.Fields(strings.TrimPrefix(line, domainSearchPrefix))
for _, dom := range doms {
// domain names are case insensitive but kubernetes allows only lower-case
searchDomains = append(searchDomains, strings.ToLower(dom))
}
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
if len(searchDomains) == 0 {
searchDomains = append(searchDomains, defaultSearchDomain)
}
return searchDomains, nil
}
// GetLongestServiceDomainName returns the longest service search domain entry
func GetLongestServiceDomainName(searchDomains []string) string {
serviceDomains := GetServiceDomainList(searchDomains)
return GetDomainName(serviceDomains)
}
// GetDomainName returns the longest search domain entry, which is the most exact equivalent to a domain
func GetDomainName(searchDomains []string) string {
selected := ""
for _, d := range searchDomains {
if len(d) > len(selected) {
selected = d
}
}
return selected
}
// GetServiceDomainList returns a list of search domains which are a service entry
func GetServiceDomainList(searchDomains []string) []string {
const k8sServiceInfix = ".svc."
serviceDomains := []string{}
for _, d := range searchDomains {
if strings.Contains(d, k8sServiceInfix) {
serviceDomains = append(serviceDomains, d)
}
}
return serviceDomains
}
// DomainNameWithSubdomain returns the DNS domain according subdomain.
// In case subdomain already exists in the domain, returns empty string, as nothing should be added.
// In case subdomain is empty, returns empty string, as nothing should be added.
// The motivation is that glibc prior to 2.26 had 6 domain / 256 bytes limit,
// Due to this limitation subdomain.namespace.svc.cluster.local DNS was not added by k8s to the pod /etc/resolv.conf.
// This function calculates the missing domain, which will be added by kubevirt.
// see https://github.com/kubernetes/kubernetes/issues/48019 for more details.
func DomainNameWithSubdomain(searchDomains []string, subdomain string) string {
if subdomain == "" {
return ""
}
domainName := GetLongestServiceDomainName(searchDomains)
if domainName != "" && !strings.HasPrefix(domainName, subdomain+".") {
return subdomain + "." + domainName
}
return ""
}
// GetResolvConfDetailsFromPod reads and parses the DNS resolver's configuration file.
func GetResolvConfDetailsFromPod() (*Nameservers, []string, error) {
// #nosec No risk for path injection. resolvConf is static "/etc/resolv.conf"
const resolvConf = "/etc/resolv.conf"
b, err := os.ReadFile(resolvConf)
if err != nil {
return nil, nil, err
}
nameservers, err := ParseNameservers(string(b))
if err != nil {
return nil, nil, err
}
searchDomains, err := ParseSearchDomains(string(b))
if err != nil {
return nil, nil, err
}
log.Log.Infof("Found IPv4 nameservers in %s: %s", resolvConf, strings.Join(toIPStrings(nameservers.IPv4), " "))
log.Log.Infof("Found IPv6 nameservers in %s: %s", resolvConf, strings.Join(toIPStrings(nameservers.IPv6), " "))
log.Log.Infof("Found search domains in %s: %s", resolvConf, strings.Join(searchDomains, " "))
return nameservers, searchDomains, nil
}
func toIPStrings(ips [][]byte) []string {
var result []string
for _, ip := range ips {
if parsed := net.IP(ip); parsed != nil {
result = append(result, parsed.String())
}
}
return result
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package downwardapi
import (
"encoding/json"
"maps"
"slices"
networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
"kubevirt.io/client-go/log"
)
const (
NetworkInfoAnnot = "kubevirt.io/network-info"
MountPath = "/etc/podinfo"
NetworkInfoVolumeName = "network-info-annotation"
NetworkInfoVolumePath = "network-info"
)
func CreateNetworkInfoAnnotationValue(networkDeviceInfoMap map[string]*networkv1.DeviceInfo) string {
networkInfo := generateNetworkInfo(networkDeviceInfoMap)
networkInfoBytes, err := json.Marshal(networkInfo)
if err != nil {
log.Log.Warningf("failed to marshal network-info: %v", err)
return ""
}
return string(networkInfoBytes)
}
func generateNetworkInfo(networkDeviceInfoMap map[string]*networkv1.DeviceInfo) NetworkInfo {
var downwardAPIInterfaces []Interface
// Sort keys of the map with to get deterministic order
sortedNetNames := slices.Sorted(maps.Keys(networkDeviceInfoMap))
for _, networkName := range sortedNetNames {
deviceInfo := networkDeviceInfoMap[networkName]
downwardAPIInterfaces = append(downwardAPIInterfaces, Interface{Network: networkName, DeviceInfo: deviceInfo})
}
networkInfo := NetworkInfo{Interfaces: downwardAPIInterfaces}
return networkInfo
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
//go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE
package driver
import (
"fmt"
"os"
"github.com/vishvananda/netlink"
netutils "k8s.io/utils/net"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/network/cache"
dhcpserver "kubevirt.io/kubevirt/pkg/network/dhcp/server"
dhcpserverv6 "kubevirt.io/kubevirt/pkg/network/dhcp/serverv6"
"kubevirt.io/kubevirt/pkg/network/dns"
)
const (
LibvirtUserAndGroupId = "0"
)
type IPVersion int
const (
IPv4 IPVersion = 4
IPv6 IPVersion = 6
)
type NetworkHandler interface {
LinkByName(name string) (netlink.Link, error)
AddrList(link netlink.Link, family int) ([]netlink.Addr, error)
ReadIPAddressesFromLink(interfaceName string) (string, string, error)
RouteList(link netlink.Link, family int) ([]netlink.Route, error)
LinkDel(link netlink.Link) error
ParseAddr(s string) (*netlink.Addr, error)
StartDHCP(nic *cache.DHCPConfig, bridgeInterfaceName string, dhcpOptions *v1.DHCPOptions) error
HasIPv4GlobalUnicastAddress(interfaceName string) (bool, error)
HasIPv6GlobalUnicastAddress(interfaceName string) (bool, error)
IsIpv4Primary() (bool, error)
}
type NetworkUtilsHandler struct{}
func (h *NetworkUtilsHandler) LinkByName(name string) (netlink.Link, error) {
return netlink.LinkByName(name)
}
func (h *NetworkUtilsHandler) AddrList(link netlink.Link, family int) ([]netlink.Addr, error) {
return netlink.AddrList(link, family)
}
func (h *NetworkUtilsHandler) RouteList(link netlink.Link, family int) ([]netlink.Route, error) {
return netlink.RouteList(link, family)
}
func (h *NetworkUtilsHandler) LinkDel(link netlink.Link) error {
return netlink.LinkDel(link)
}
func (h *NetworkUtilsHandler) ParseAddr(s string) (*netlink.Addr, error) {
return netlink.ParseAddr(s)
}
func (h *NetworkUtilsHandler) HasIPv4GlobalUnicastAddress(interfaceName string) (bool, error) {
link, err := h.LinkByName(interfaceName)
if err != nil {
return false, err
}
addrList, err := h.AddrList(link, netlink.FAMILY_V4)
if err != nil {
return false, err
}
for _, addr := range addrList {
if addr.IP.IsGlobalUnicast() {
return true, nil
}
}
return false, nil
}
func (h *NetworkUtilsHandler) HasIPv6GlobalUnicastAddress(interfaceName string) (bool, error) {
link, err := h.LinkByName(interfaceName)
if err != nil {
return false, err
}
addrList, err := h.AddrList(link, netlink.FAMILY_V6)
if err != nil {
return false, err
}
for _, addr := range addrList {
if addr.IP.IsGlobalUnicast() {
return true, nil
}
}
return false, nil
}
func (h *NetworkUtilsHandler) IsIpv4Primary() (bool, error) {
podIP, exist := os.LookupEnv("MY_POD_IP")
if !exist {
return false, fmt.Errorf("MY_POD_IP doesn't exist")
}
return !netutils.IsIPv6String(podIP), nil
}
func (h *NetworkUtilsHandler) ReadIPAddressesFromLink(interfaceName string) (string, string, error) {
link, err := h.LinkByName(interfaceName)
if err != nil {
log.Log.Reason(err).Errorf("failed to get a link for interface: %s", interfaceName)
return "", "", err
}
// get IP address
addrList, err := h.AddrList(link, netlink.FAMILY_ALL)
if err != nil {
log.Log.Reason(err).Errorf("failed to get an address for interface: %s", interfaceName)
return "", "", err
}
// no ip assigned. ipam disabled
if len(addrList) == 0 {
return "", "", nil
}
var ipv4, ipv6 string
for _, addr := range addrList {
if addr.IP.IsGlobalUnicast() {
if netutils.IsIPv6(addr.IP) && ipv6 == "" {
ipv6 = addr.IP.String()
} else if !netutils.IsIPv6(addr.IP) && ipv4 == "" {
ipv4 = addr.IP.String()
}
}
}
return ipv4, ipv6, nil
}
func (h *NetworkUtilsHandler) StartDHCP(nic *cache.DHCPConfig, bridgeInterfaceName string, dhcpOptions *v1.DHCPOptions) error {
log.Log.V(4).Infof("StartDHCP network Nic: %+v", nic)
nameservers, searchDomains, err := dns.GetResolvConfDetailsFromPod()
if err != nil {
return fmt.Errorf("Failed to get DNS servers from resolv.conf: %v", err)
}
domain := dns.DomainNameWithSubdomain(searchDomains, nic.Subdomain)
if domain != "" {
searchDomains = append([]string{domain}, searchDomains...)
}
if nic.IP.IPNet != nil {
// panic in case the DHCP server failed during the vm creation
// but ignore dhcp errors when the vm is destroyed or shutting down
go func() {
if err = DHCPServer(
nic.MAC,
nic.IP.IP,
nic.IP.Mask,
bridgeInterfaceName,
nic.AdvertisingIPAddr,
nic.Gateway,
nameservers.IPv4,
nic.Routes,
searchDomains,
nic.Mtu,
dhcpOptions,
); err != nil {
log.Log.Errorf("failed to run DHCP Server: %v", err)
panic(err)
}
}()
}
if nic.IPv6.IPNet != nil {
go func() {
if err = DHCPv6Server(
nic.IPv6.IP,
bridgeInterfaceName,
nameservers.IPv6,
); err != nil {
log.Log.Reason(err).Error("failed to run DHCPv6 Server")
panic(err)
}
}()
}
return nil
}
// Allow mocking for tests
var DHCPServer = dhcpserver.SingleClientDHCPServer
var DHCPv6Server = dhcpserverv6.SingleClientDHCPv6Server
// Code generated by MockGen. DO NOT EDIT.
// Source: common.go
//
// Generated by this command:
//
// mockgen -source common.go -package=driver -destination=generated_mock_common.go
//
// Package driver is a generated GoMock package.
package driver
import (
reflect "reflect"
netlink "github.com/vishvananda/netlink"
gomock "go.uber.org/mock/gomock"
v1 "kubevirt.io/api/core/v1"
cache "kubevirt.io/kubevirt/pkg/network/cache"
)
// MockNetworkHandler is a mock of NetworkHandler interface.
type MockNetworkHandler struct {
ctrl *gomock.Controller
recorder *MockNetworkHandlerMockRecorder
isgomock struct{}
}
// MockNetworkHandlerMockRecorder is the mock recorder for MockNetworkHandler.
type MockNetworkHandlerMockRecorder struct {
mock *MockNetworkHandler
}
// NewMockNetworkHandler creates a new mock instance.
func NewMockNetworkHandler(ctrl *gomock.Controller) *MockNetworkHandler {
mock := &MockNetworkHandler{ctrl: ctrl}
mock.recorder = &MockNetworkHandlerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockNetworkHandler) EXPECT() *MockNetworkHandlerMockRecorder {
return m.recorder
}
// AddrList mocks base method.
func (m *MockNetworkHandler) AddrList(link netlink.Link, family int) ([]netlink.Addr, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddrList", link, family)
ret0, _ := ret[0].([]netlink.Addr)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AddrList indicates an expected call of AddrList.
func (mr *MockNetworkHandlerMockRecorder) AddrList(link, family any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddrList", reflect.TypeOf((*MockNetworkHandler)(nil).AddrList), link, family)
}
// HasIPv4GlobalUnicastAddress mocks base method.
func (m *MockNetworkHandler) HasIPv4GlobalUnicastAddress(interfaceName string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HasIPv4GlobalUnicastAddress", interfaceName)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// HasIPv4GlobalUnicastAddress indicates an expected call of HasIPv4GlobalUnicastAddress.
func (mr *MockNetworkHandlerMockRecorder) HasIPv4GlobalUnicastAddress(interfaceName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasIPv4GlobalUnicastAddress", reflect.TypeOf((*MockNetworkHandler)(nil).HasIPv4GlobalUnicastAddress), interfaceName)
}
// HasIPv6GlobalUnicastAddress mocks base method.
func (m *MockNetworkHandler) HasIPv6GlobalUnicastAddress(interfaceName string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HasIPv6GlobalUnicastAddress", interfaceName)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// HasIPv6GlobalUnicastAddress indicates an expected call of HasIPv6GlobalUnicastAddress.
func (mr *MockNetworkHandlerMockRecorder) HasIPv6GlobalUnicastAddress(interfaceName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasIPv6GlobalUnicastAddress", reflect.TypeOf((*MockNetworkHandler)(nil).HasIPv6GlobalUnicastAddress), interfaceName)
}
// IsIpv4Primary mocks base method.
func (m *MockNetworkHandler) IsIpv4Primary() (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsIpv4Primary")
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IsIpv4Primary indicates an expected call of IsIpv4Primary.
func (mr *MockNetworkHandlerMockRecorder) IsIpv4Primary() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIpv4Primary", reflect.TypeOf((*MockNetworkHandler)(nil).IsIpv4Primary))
}
// LinkByName mocks base method.
func (m *MockNetworkHandler) LinkByName(name string) (netlink.Link, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LinkByName", name)
ret0, _ := ret[0].(netlink.Link)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LinkByName indicates an expected call of LinkByName.
func (mr *MockNetworkHandlerMockRecorder) LinkByName(name any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LinkByName", reflect.TypeOf((*MockNetworkHandler)(nil).LinkByName), name)
}
// LinkDel mocks base method.
func (m *MockNetworkHandler) LinkDel(link netlink.Link) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LinkDel", link)
ret0, _ := ret[0].(error)
return ret0
}
// LinkDel indicates an expected call of LinkDel.
func (mr *MockNetworkHandlerMockRecorder) LinkDel(link any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LinkDel", reflect.TypeOf((*MockNetworkHandler)(nil).LinkDel), link)
}
// ParseAddr mocks base method.
func (m *MockNetworkHandler) ParseAddr(s string) (*netlink.Addr, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ParseAddr", s)
ret0, _ := ret[0].(*netlink.Addr)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ParseAddr indicates an expected call of ParseAddr.
func (mr *MockNetworkHandlerMockRecorder) ParseAddr(s any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseAddr", reflect.TypeOf((*MockNetworkHandler)(nil).ParseAddr), s)
}
// ReadIPAddressesFromLink mocks base method.
func (m *MockNetworkHandler) ReadIPAddressesFromLink(interfaceName string) (string, string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadIPAddressesFromLink", interfaceName)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(string)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// ReadIPAddressesFromLink indicates an expected call of ReadIPAddressesFromLink.
func (mr *MockNetworkHandlerMockRecorder) ReadIPAddressesFromLink(interfaceName any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadIPAddressesFromLink", reflect.TypeOf((*MockNetworkHandler)(nil).ReadIPAddressesFromLink), interfaceName)
}
// RouteList mocks base method.
func (m *MockNetworkHandler) RouteList(link netlink.Link, family int) ([]netlink.Route, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RouteList", link, family)
ret0, _ := ret[0].([]netlink.Route)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RouteList indicates an expected call of RouteList.
func (mr *MockNetworkHandlerMockRecorder) RouteList(link, family any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RouteList", reflect.TypeOf((*MockNetworkHandler)(nil).RouteList), link, family)
}
// StartDHCP mocks base method.
func (m *MockNetworkHandler) StartDHCP(nic *cache.DHCPConfig, bridgeInterfaceName string, dhcpOptions *v1.DHCPOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StartDHCP", nic, bridgeInterfaceName, dhcpOptions)
ret0, _ := ret[0].(error)
return ret0
}
// StartDHCP indicates an expected call of StartDHCP.
func (mr *MockNetworkHandlerMockRecorder) StartDHCP(nic, bridgeInterfaceName, dhcpOptions any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartDHCP", reflect.TypeOf((*MockNetworkHandler)(nil).StartDHCP), nic, bridgeInterfaceName, dhcpOptions)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package istio
const (
EnvoyAdminPort = 15000
EnvoyOutboundPort = 15001
EnvoyDebugPort = 15004
EnvoyInboundPort = 15006
EnvoyTunnelPort = 15008
EnvoySecureNetworkPort = 15009
EnvoyMergedPrometheusTelemetryPort = 15020
EnvoyHealthCheckPort = 15021
EnvoyDNSPort = 15053
EnvoyPrometheusTelemetryPort = 15090
SSHPort = 22
)
func ReservedPorts() []uint {
return []uint{
EnvoyAdminPort,
EnvoyOutboundPort,
EnvoyDebugPort,
EnvoyInboundPort,
EnvoyTunnelPort,
EnvoySecureNetworkPort,
EnvoyMergedPrometheusTelemetryPort,
EnvoyHealthCheckPort,
EnvoyDNSPort,
EnvoyPrometheusTelemetryPort,
}
}
func NonProxiedPorts() []int {
return []int{
SSHPort,
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package istio
import (
"strings"
v1 "kubevirt.io/api/core/v1"
)
func ProxyInjectionEnabled(vmi *v1.VirtualMachineInstance) bool {
if val, ok := vmi.GetAnnotations()[InjectSidecarAnnotation]; ok {
return strings.EqualFold(val, "true")
}
return false
}
func GetLoopbackAddress() string {
return "127.0.0.6"
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package link
import (
"fmt"
"net"
"github.com/vishvananda/netlink"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
netdriver "kubevirt.io/kubevirt/pkg/network/driver"
"kubevirt.io/kubevirt/pkg/network/netmachinery"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
const bridgeFakeIP = "169.254.75.1%d/32"
func getMasqueradeGwAndHostAddressesFromCIDR(s string) (string, string, error) {
ip, ipnet, err := net.ParseCIDR(s)
if err != nil {
return "", "", err
}
subnet, _ := ipnet.Mask.Size()
var ips []string
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); netmachinery.NextIP(ip) {
ips = append(ips, fmt.Sprintf("%s/%d", ip.String(), subnet))
if len(ips) == 4 {
// remove network address and broadcast address
return ips[1], ips[2], nil
}
}
return "", "", fmt.Errorf("less than 4 addresses on network")
}
func GenerateMasqueradeGatewayAndVmIPAddrs(vmiSpecNetwork *v1.Network, ipVersion netdriver.IPVersion) (*netlink.Addr, *netlink.Addr, error) {
var cidrToConfigure string
if ipVersion == netdriver.IPv4 {
if vmiSpecNetwork.Pod.VMNetworkCIDR == "" {
cidrToConfigure = api.DefaultVMCIDR
} else {
cidrToConfigure = vmiSpecNetwork.Pod.VMNetworkCIDR
}
}
if ipVersion == netdriver.IPv6 {
if vmiSpecNetwork.Pod.VMIPv6NetworkCIDR == "" {
cidrToConfigure = api.DefaultVMIpv6CIDR
} else {
cidrToConfigure = vmiSpecNetwork.Pod.VMIPv6NetworkCIDR
}
}
gatewayIP, vmIP, err := getMasqueradeGwAndHostAddressesFromCIDR(cidrToConfigure)
if err != nil {
log.Log.Reason(err).Errorf("failed to get gw and vm available addresses from CIDR %s", cidrToConfigure)
return nil, nil, err
}
gatewayAddr, err := netlink.ParseAddr(gatewayIP)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse gateway address %s err %v", gatewayAddr, err)
}
vmAddr, err := netlink.ParseAddr(vmIP)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse vm address %s err %v", vmAddr, err)
}
return gatewayAddr, vmAddr, nil
}
func RetrieveMacAddressFromVMISpecIface(vmiSpecIface *v1.Interface) (*net.HardwareAddr, error) {
if vmiSpecIface.MacAddress != "" {
macAddress, err := net.ParseMAC(vmiSpecIface.MacAddress)
if err != nil {
return nil, err
}
return &macAddress, nil
}
return nil, nil
}
func GetFakeBridgeIP(vmiSpecIfaces []v1.Interface, vmiSpecIface *v1.Interface) string {
for i, iface := range vmiSpecIfaces {
if iface.Name == vmiSpecIface.Name {
return fmt.Sprintf(bridgeFakeIP, i)
}
}
return ""
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package link
import (
"errors"
"fmt"
"strings"
"github.com/vishvananda/netlink"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/network/driver"
"kubevirt.io/kubevirt/pkg/network/namescheme"
)
// DiscoverByNetwork return the pod interface link of the given network name.
// If link not found, it will try to get the link using the pod interface's ordinal name (net1, net2,...)
// based on the subject network position in the given networks slice.
// If no link is found, a nil link will be returned.
func DiscoverByNetwork(handler driver.NetworkHandler, networks []v1.Network, subjectNetwork v1.Network, ifaceStatuses []v1.VirtualMachineInstanceNetworkInterface) (netlink.Link, error) {
ifaceNames, err := networkInterfaceNames(networks, subjectNetwork, ifaceStatuses)
if err != nil {
return nil, err
}
return linkByNames(handler, ifaceNames)
}
func networkInterfaceNames(networks []v1.Network, subjectNetwork v1.Network, ifaceStatuses []v1.VirtualMachineInstanceNetworkInterface) ([]string, error) {
ifaceName := namescheme.HashedPodInterfaceName(subjectNetwork, ifaceStatuses)
ordinalIfaceName := namescheme.OrdinalPodInterfaceName(subjectNetwork.Name, networks)
if ordinalIfaceName == "" {
return nil, fmt.Errorf("could not find the pod interface ordinal name for network [%s]", subjectNetwork.Name)
}
return []string{ifaceName, ordinalIfaceName}, nil
}
func linkByNames(handler driver.NetworkHandler, names []string) (netlink.Link, error) {
var errs []string
for _, name := range names {
link, err := handler.LinkByName(name)
if err == nil {
return link, nil
}
var linkNotFoundErr netlink.LinkNotFoundError
if !errors.As(err, &linkNotFoundErr) {
errs = append(errs, fmt.Sprintf("could not get link with name %q: %v", name, err))
}
}
if len(errs) == 0 {
return nil, nil
}
return nil, fmt.Errorf("%s", strings.Join(errs, ", "))
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package link
import (
"fmt"
"net"
)
const StaticMasqueradeBridgeMAC = "02:00:00:00:00:00"
func IsReserved(mac string) bool {
return mac == StaticMasqueradeBridgeMAC
}
// ValidateMacAddress performs a validation of the address validity in terms of format and size.
// An empty mac address input is ignored (i.e. is considered valid).
func ValidateMacAddress(macAddress string) error {
if macAddress == "" {
return nil
}
mac, err := net.ParseMAC(macAddress)
if err != nil {
return fmt.Errorf("malformed MAC address (%s)", macAddress)
}
const macLen = 6
if len(mac) > macLen {
return fmt.Errorf("too long MAC address (%s)", macAddress)
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package link
import (
"fmt"
"strings"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/network/namescheme"
"kubevirt.io/kubevirt/pkg/network/vmispec"
)
const tapNameForPrimaryIface = "tap0"
func GenerateTapDeviceName(podInterfaceName string, network v1.Network) string {
if vmispec.IsSecondaryMultusNetwork(network) {
return "tap" + podInterfaceName[3:]
}
return tapNameForPrimaryIface
}
func GenerateBridgeName(podInterfaceName string) string {
trimmedName := strings.TrimPrefix(podInterfaceName, namescheme.HashedIfacePrefix)
return "k6t-" + trimmedName
}
func GenerateNewBridgedVmiInterfaceName(originalPodInterfaceName string) string {
trimmedName := strings.TrimPrefix(originalPodInterfaceName, namescheme.HashedIfacePrefix)
return fmt.Sprintf("%s-nic", trimmedName)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package multus
import (
"encoding/json"
"fmt"
networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/network/namescheme"
"kubevirt.io/kubevirt/pkg/network/vmispec"
)
const (
// DefaultNetworkCNIAnnotation is used when one wants to instruct Multus to connect the pod's primary interface
// to a network other than Multus's `clusterNetwork` field under /etc/cni/net.d
// The value of this annotation should be a NetworkAttachmentDefinition's name
DefaultNetworkCNIAnnotation = "v1.multus-cni.io/default-network"
// ResourceNameAnnotation represents a resource name that is associated with the network.
// It could be found on NetworkAttachmentDefinition objects.
ResourceNameAnnotation = "k8s.v1.cni.cncf.io/resourceName"
)
func GenerateCNIAnnotation(
namespace string,
interfaces []v1.Interface,
networks []v1.Network,
registeredBindingPlugins map[string]v1.InterfaceBindingPlugin,
) (string, error) {
return GenerateCNIAnnotationFromNameScheme(
namespace,
interfaces,
networks,
namescheme.CreateHashedNetworkNameScheme(networks),
registeredBindingPlugins,
)
}
func GenerateCNIAnnotationFromNameScheme(
namespace string,
interfaces []v1.Interface,
networks []v1.Network,
networkNameScheme map[string]string,
registeredBindingPlugins map[string]v1.InterfaceBindingPlugin,
) (string, error) {
var networkSelectionElements []networkv1.NetworkSelectionElement
for _, network := range networks {
if vmispec.IsSecondaryMultusNetwork(network) {
podInterfaceName := networkNameScheme[network.Name]
networkSelectionElements = append(networkSelectionElements, newAnnotationData(namespace, interfaces, network, podInterfaceName))
}
if iface := vmispec.LookupInterfaceByName(interfaces, network.Name); iface.Binding != nil {
bindingPluginAnnotationData, err := newBindingPluginAnnotationData(
registeredBindingPlugins,
iface.Binding.Name,
namespace,
network.Name,
)
if err != nil {
return "", err
}
if bindingPluginAnnotationData != nil {
networkSelectionElements = append(networkSelectionElements, *bindingPluginAnnotationData)
}
}
}
if len(networkSelectionElements) == 0 {
return "", nil
}
multusNetworksAnnotation, err := json.Marshal(networkSelectionElements)
if err != nil {
return "", fmt.Errorf("failed to create JSON list from networkSelectionElements: %v", networkSelectionElements)
}
return string(multusNetworksAnnotation), nil
}
func newAnnotationData(
namespace string,
interfaces []v1.Interface,
network v1.Network,
podInterfaceName string,
) networkv1.NetworkSelectionElement {
multusIface := vmispec.LookupInterfaceByName(interfaces, network.Name)
nadNamespacedName := NetAttachDefNamespacedName(namespace, network.Multus.NetworkName)
var multusIfaceMac string
if multusIface != nil {
multusIfaceMac = multusIface.MacAddress
}
return networkv1.NetworkSelectionElement{
InterfaceRequest: podInterfaceName,
MacRequest: multusIfaceMac,
Namespace: nadNamespacedName.Namespace,
Name: nadNamespacedName.Name,
}
}
func newBindingPluginAnnotationData(
registeredBindingPlugins map[string]v1.InterfaceBindingPlugin,
pluginName,
namespace,
networkName string,
) (*networkv1.NetworkSelectionElement, error) {
plugin, exists := registeredBindingPlugins[pluginName]
if !exists {
return nil, fmt.Errorf("unable to find the network binding plugin '%s' in Kubevirt configuration", pluginName)
}
if plugin.NetworkAttachmentDefinition == "" {
return nil, nil
}
nadNamespacedName := NetAttachDefNamespacedName(namespace, plugin.NetworkAttachmentDefinition)
// cniArgNetworkName is the CNI arg name for the VM spec network logical name.
// The binding plugin CNI should read this arg and realize which logical network it should modify.
const cniArgNetworkName = "logicNetworkName"
return &networkv1.NetworkSelectionElement{
Namespace: nadNamespacedName.Namespace,
Name: nadNamespacedName.Name,
CNIArgs: &map[string]interface{}{
cniArgNetworkName: networkName,
},
}, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package multus
import (
"context"
"fmt"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/precond"
)
func NetAttachDefNamespacedName(namespace, fullNetworkName string) types.NamespacedName {
if strings.Contains(fullNetworkName, "/") {
const twoParts = 2
res := strings.SplitN(fullNetworkName, "/", twoParts)
return types.NamespacedName{
Namespace: res[0],
Name: res[1],
}
}
return types.NamespacedName{
Namespace: precond.MustNotBeEmpty(namespace),
Name: fullNetworkName,
}
}
func NetworkToResource(virtClient kubecli.KubevirtClient, vmi *v1.VirtualMachineInstance) (map[string]string, error) {
networkToResourceMap := map[string]string{}
for _, network := range vmi.Spec.Networks {
if network.Multus == nil {
continue
}
nadNamespacedName := NetAttachDefNamespacedName(vmi.Namespace, network.Multus.NetworkName)
netAttachDef, err := virtClient.NetworkClient().
K8sCniCncfIoV1().
NetworkAttachmentDefinitions(nadNamespacedName.Namespace).
Get(context.Background(), nadNamespacedName.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to locate network attachment definition %s", nadNamespacedName.String())
}
networkToResourceMap[network.Name] = netAttachDef.Annotations[ResourceNameAnnotation]
}
return networkToResourceMap, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package multus
import (
"encoding/json"
k8scorev1 "k8s.io/api/core/v1"
networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
"kubevirt.io/client-go/log"
)
// NetworkStatusesByPodIfaceName creates a map of NetworkStatus objects by pod interface name.
func NetworkStatusesByPodIfaceName(networkStatuses []networkv1.NetworkStatus) map[string]networkv1.NetworkStatus {
statusesByPodIfaceName := map[string]networkv1.NetworkStatus{}
for _, ns := range networkStatuses {
statusesByPodIfaceName[ns.Interface] = ns
}
return statusesByPodIfaceName
}
func NetworkStatusesFromPod(pod *k8scorev1.Pod) []networkv1.NetworkStatus {
var networkStatuses []networkv1.NetworkStatus
if rawNetworkStatus := pod.Annotations[networkv1.NetworkStatusAnnot]; rawNetworkStatus != "" {
if err := json.Unmarshal([]byte(rawNetworkStatus), &networkStatuses); err != nil {
log.Log.Errorf("failed to unmarshall pod network status: %v", err)
}
}
return networkStatuses
}
func LookupPodPrimaryIfaceName(networkStatuses []networkv1.NetworkStatus) string {
for _, ns := range networkStatuses {
if ns.Default && ns.Interface != "" {
return ns.Interface
}
}
return ""
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package namescheme
import (
"crypto/sha256"
"fmt"
"io"
"maps"
"regexp"
networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/network/vmispec"
)
const (
// maxIfaceNameLen equals max kernel interface name len (15) - length("-nic")
// which is the suffix used for the bridge binding interface with IPAM.
// (the interface created to hold the pod's IP address - and thus appease CNI).
maxIfaceNameLen = 11
HashedIfacePrefix = "pod"
PrimaryPodInterfaceName = "eth0"
)
// CreateHashedNetworkNameScheme iterates over the VMI's Networks, and creates for each a pod interface name.
// The returned map associates between the network name and the generated pod interface name.
// Primary network will use "eth0" and the secondary ones will be named with the hashed network name.
func CreateHashedNetworkNameScheme(vmiNetworks []v1.Network) map[string]string {
networkNameSchemeMap := mapMultusNonDefaultNetworksToPodInterfaceName(vmiNetworks)
if multusDefaultNetwork := vmispec.LookUpDefaultNetwork(vmiNetworks); multusDefaultNetwork != nil {
networkNameSchemeMap[multusDefaultNetwork.Name] = PrimaryPodInterfaceName
}
return networkNameSchemeMap
}
func HashedPodInterfaceName(network v1.Network, ifaceStatuses []v1.VirtualMachineInstanceNetworkInterface) string {
if vmispec.IsSecondaryMultusNetwork(network) {
return GenerateHashedInterfaceName(network.Name)
}
if primaryIfaceStatus := vmispec.LookupInterfaceStatusByName(ifaceStatuses, network.Name); primaryIfaceStatus != nil &&
primaryIfaceStatus.PodInterfaceName != "" {
return primaryIfaceStatus.PodInterfaceName
}
return PrimaryPodInterfaceName
}
func mapMultusNonDefaultNetworksToPodInterfaceName(networks []v1.Network) map[string]string {
networkNameSchemeMap := map[string]string{}
for _, network := range vmispec.FilterMultusNonDefaultNetworks(networks) {
networkNameSchemeMap[network.Name] = GenerateHashedInterfaceName(network.Name)
}
return networkNameSchemeMap
}
func GenerateHashedInterfaceName(networkName string) string {
hash := sha256.New()
_, _ = io.WriteString(hash, networkName)
hashedName := fmt.Sprintf("%x", hash.Sum(nil))[:maxIfaceNameLen]
return fmt.Sprintf("%s%s", HashedIfacePrefix, hashedName)
}
// CreateOrdinalNetworkNameScheme iterates over the VMI's Networks, and creates for each a pod interface name.
// The returned map associates between the network name and the generated pod interface name.
// Primary network will use "eth0" and the secondary ones will use "net<id>" format, where id is an enumeration
// from 1 to n.
func CreateOrdinalNetworkNameScheme(vmiNetworks []v1.Network) map[string]string {
networkNameSchemeMap := mapMultusNonDefaultNetworksToPodInterfaceOrdinalName(vmiNetworks)
if multusDefaultNetwork := vmispec.LookUpDefaultNetwork(vmiNetworks); multusDefaultNetwork != nil {
networkNameSchemeMap[multusDefaultNetwork.Name] = PrimaryPodInterfaceName
}
return networkNameSchemeMap
}
// OrdinalPodInterfaceName returns the ordinal interface name for the given network name.
// Rereuse the `CreateOrdinalNetworkNameScheme` for various networks helps find the target interface name.
func OrdinalPodInterfaceName(name string, networks []v1.Network) string {
networkNameSchemeMap := CreateOrdinalNetworkNameScheme(networks)
if ordinalName, exist := networkNameSchemeMap[name]; exist {
return ordinalName
}
return ""
}
func mapMultusNonDefaultNetworksToPodInterfaceOrdinalName(networks []v1.Network) map[string]string {
networkNameSchemeMap := map[string]string{}
for i, network := range vmispec.FilterMultusNonDefaultNetworks(networks) {
networkNameSchemeMap[network.Name] = generateOrdinalInterfaceName(i + 1)
}
return networkNameSchemeMap
}
func generateOrdinalInterfaceName(idx int) string {
const ordinalIfacePrefix = "net"
return fmt.Sprintf("%s%d", ordinalIfacePrefix, idx)
}
// CreateFromNetworkStatuses creates a mapping of network name to pod interface name
// based on the given VMI spec networks and pod network statuses.
// In case the pod network status list has at least one interface with ordinal interface name -
// the function will return an ordinal network name-scheme.
func CreateFromNetworkStatuses(networks []v1.Network, networkStatuses []networkv1.NetworkStatus) map[string]string {
if PodHasOrdinalInterfaceName(networkStatuses) {
return CreateOrdinalNetworkNameScheme(networks)
}
return CreateHashedNetworkNameScheme(networks)
}
// PodHasOrdinalInterfaceName checks if the given pod network status has at least one pod interface with ordinal name
func PodHasOrdinalInterfaceName(networkStatuses []networkv1.NetworkStatus) bool {
for _, networkStatus := range networkStatuses {
if OrdinalSecondaryInterfaceName(networkStatus.Interface) {
return true
}
}
return false
}
// OrdinalSecondaryInterfaceName check if the given name is in form of the ordinal
// name scheme (e.g.: net1, net2..).
// Primary iface name (eth0) is treated as non-ordinal interface name.
func OrdinalSecondaryInterfaceName(name string) bool {
const ordinalIfaceNameRegex = `^net\d+$`
match, err := regexp.MatchString(ordinalIfaceNameRegex, name)
if err != nil {
return false
}
return match
}
func UpdatePrimaryPodIfaceNameFromVMIStatus(
podIfaceNamesByNetworkName map[string]string,
networks []v1.Network,
ifaceStatuses []v1.VirtualMachineInstanceNetworkInterface,
) map[string]string {
primaryNetwork := vmispec.LookUpDefaultNetwork(networks)
if primaryNetwork == nil {
return podIfaceNamesByNetworkName
}
primaryIfaceStatus := vmispec.LookupInterfaceStatusByName(ifaceStatuses, primaryNetwork.Name)
if primaryIfaceStatus == nil || primaryIfaceStatus.PodInterfaceName == "" {
return podIfaceNamesByNetworkName
}
updatedPodIfaceNamesByNetworkName := maps.Clone(podIfaceNamesByNetworkName)
updatedPodIfaceNamesByNetworkName[primaryNetwork.Name] = primaryIfaceStatus.PodInterfaceName
return updatedPodIfaceNamesByNetworkName
}
func HasOrdinalSecondaryIfaces(
networks []v1.Network,
ifaceStatuses []v1.VirtualMachineInstanceNetworkInterface,
) bool {
secondaryNets := vmispec.FilterMultusNonDefaultNetworks(networks)
if len(secondaryNets) == 0 {
return false
}
ifaceStatusesByName := vmispec.IndexInterfaceStatusByName(ifaceStatuses, nil)
for _, net := range secondaryNets {
ifaceStatus, ok := ifaceStatusesByName[net.Name]
if ok && ifaceStatus.PodInterfaceName != "" {
return OrdinalSecondaryInterfaceName(ifaceStatus.PodInterfaceName)
}
}
// Naming scheme is unknown - therefore assume the most conservative option:
// This VMI has a pod which uses an ordinal network naming scheme.
return true
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package netbinding
import (
k8scorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
)
type MemoryCalculator struct{}
func (mc MemoryCalculator) Calculate(
vmi *v1.VirtualMachineInstance,
registeredPlugins map[string]v1.InterfaceBindingPlugin,
) resource.Quantity {
return sumPluginsMemoryRequests(
filterUniquePlugins(vmi.Spec.Domain.Devices.Interfaces, registeredPlugins),
)
}
func filterUniquePlugins(interfaces []v1.Interface, registeredPlugins map[string]v1.InterfaceBindingPlugin) []v1.InterfaceBindingPlugin {
var uniquePlugins []v1.InterfaceBindingPlugin
uniquePluginsSet := map[string]struct{}{}
for _, iface := range interfaces {
if iface.Binding == nil {
continue
}
pluginName := iface.Binding.Name
if _, seen := uniquePluginsSet[pluginName]; seen {
continue
}
plugin, exists := registeredPlugins[pluginName]
if !exists {
continue
}
uniquePluginsSet[pluginName] = struct{}{}
uniquePlugins = append(uniquePlugins, plugin)
}
return uniquePlugins
}
func sumPluginsMemoryRequests(uniquePlugins []v1.InterfaceBindingPlugin) resource.Quantity {
result := resource.Quantity{}
for _, plugin := range uniquePlugins {
if plugin.ComputeResourceOverhead == nil {
continue
}
requests := plugin.ComputeResourceOverhead.Requests
if requests == nil {
continue
}
result.Add(requests[k8scorev1.ResourceMemory])
}
return result
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package netbinding
import (
"fmt"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/hooks"
)
func NetBindingPluginSidecarList(vmi *v1.VirtualMachineInstance, config *v1.KubeVirtConfiguration) (hooks.HookSidecarList, error) {
var pluginSidecars hooks.HookSidecarList
netbindingPluginSidecars, err := netBindingPluginSidecar(vmi, config)
if err != nil {
return nil, err
}
pluginSidecars = append(pluginSidecars, netbindingPluginSidecars...)
return pluginSidecars, nil
}
func netBindingPluginSidecar(vmi *v1.VirtualMachineInstance, config *v1.KubeVirtConfiguration) (hooks.HookSidecarList, error) {
var pluginSidecars hooks.HookSidecarList
bindingByName := map[string]v1.InterfaceBindingPlugin{}
for _, iface := range vmi.Spec.Domain.Devices.Interfaces {
if iface.Binding != nil {
var exist bool
var pluginInfo v1.InterfaceBindingPlugin
if config.NetworkConfiguration != nil && config.NetworkConfiguration.Binding != nil {
pluginInfo, exist = config.NetworkConfiguration.Binding[iface.Binding.Name]
bindingByName[iface.Binding.Name] = pluginInfo
}
if !exist {
return nil, fmt.Errorf("couldn't find configuration for network binding: %s", iface.Binding.Name)
}
}
}
for _, pluginInfo := range bindingByName {
if pluginInfo.SidecarImage != "" {
pluginSidecars = append(pluginSidecars, hooks.HookSidecar{
Image: pluginInfo.SidecarImage,
ImagePullPolicy: config.ImagePullPolicy,
DownwardAPI: pluginInfo.DownwardAPI,
})
}
}
return pluginSidecars, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*
*/
package netmachinery
import (
"net"
)
// NextIP increments the IP address by one (in-place).
func NextIP(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmispec
import (
"fmt"
v1 "kubevirt.io/api/core/v1"
)
type netClusterConfigurer interface {
GetDefaultNetworkInterface() string
IsBridgeInterfaceOnPodNetworkEnabled() bool
}
func SetDefaultNetworkInterface(config netClusterConfigurer, spec *v1.VirtualMachineInstanceSpec) error {
if autoAttach := spec.Domain.Devices.AutoattachPodInterface; autoAttach != nil && !*autoAttach {
return nil
}
// Override only when nothing is specified
if len(spec.Networks) != 0 || len(spec.Domain.Devices.Interfaces) != 0 {
return nil
}
switch v1.NetworkInterfaceType(config.GetDefaultNetworkInterface()) {
case v1.BridgeInterface:
if !config.IsBridgeInterfaceOnPodNetworkEnabled() {
return fmt.Errorf("bridge interface is not enabled in kubevirt-config")
}
spec.Domain.Devices.Interfaces = []v1.Interface{*v1.DefaultBridgeNetworkInterface()}
case v1.MasqueradeInterface:
spec.Domain.Devices.Interfaces = []v1.Interface{*v1.DefaultMasqueradeNetworkInterface()}
case v1.DeprecatedSlirpInterface:
return fmt.Errorf("slirp interface is deprecated as of v1.3")
}
spec.Networks = []v1.Network{*v1.DefaultPodNetwork()}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmispec
import v1 "kubevirt.io/api/core/v1"
// RequiresVirtioNetDevice checks whether a VMI requires the presence of the "virtio" net device.
// This happens when the VMI wants to use a "virtio" network interface, and software emulation is disallowed.
func RequiresVirtioNetDevice(vmi *v1.VirtualMachineInstance, allowEmulation bool) bool {
return hasVirtioIface(vmi) && !allowEmulation
}
func RequiresTunDevice(vmi *v1.VirtualMachineInstance) bool {
return (len(vmi.Spec.Domain.Devices.Interfaces) > 0) ||
(vmi.Spec.Domain.Devices.AutoattachPodInterface == nil) ||
(*vmi.Spec.Domain.Devices.AutoattachPodInterface)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*/
package vmispec
import "strings"
const (
InfoSourceDomain string = "domain"
InfoSourceGuestAgent string = "guest-agent"
InfoSourceMultusStatus string = "multus-status"
InfoSourceDomainAndGA string = InfoSourceDomain + ", " + InfoSourceGuestAgent
separator = ", "
)
func AddInfoSource(infoSourceData, name string) string {
var infoSources []string
if infoSourceData != "" {
infoSources = strings.Split(infoSourceData, separator)
}
for _, infoSourceName := range infoSources {
if infoSourceName == name {
return infoSourceData
}
}
infoSources = append(infoSources, name)
return NewInfoSource(infoSources...)
}
func RemoveInfoSource(infoSourceData, name string) string {
var newInfoSources []string
infoSources := strings.Split(infoSourceData, separator)
for _, infoSourceName := range infoSources {
if infoSourceName != name {
newInfoSources = append(newInfoSources, infoSourceName)
}
}
return NewInfoSource(newInfoSources...)
}
func ContainsInfoSource(infoSourceData, name string) bool {
infoSources := strings.Split(infoSourceData, separator)
for _, infoSourceName := range infoSources {
if infoSourceName == name {
return true
}
}
return false
}
func NewInfoSource(names ...string) string {
return strings.Join(names, separator)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmispec
import (
"fmt"
v1 "kubevirt.io/api/core/v1"
)
func FilterSRIOVInterfaces(ifaces []v1.Interface) []v1.Interface {
var sriovIfaces []v1.Interface
for _, iface := range ifaces {
if iface.SRIOV != nil {
sriovIfaces = append(sriovIfaces, iface)
}
}
return sriovIfaces
}
func SRIOVInterfaceExist(ifaces []v1.Interface) bool {
for _, iface := range ifaces {
if iface.SRIOV != nil {
return true
}
}
return false
}
func FilterInterfacesSpec(ifaces []v1.Interface, predicate func(i v1.Interface) bool) []v1.Interface {
var filteredIfaces []v1.Interface
for _, iface := range ifaces {
if predicate(iface) {
filteredIfaces = append(filteredIfaces, iface)
}
}
return filteredIfaces
}
func VerifyVMIMigratable(vmi *v1.VirtualMachineInstance, bindingPlugins map[string]v1.InterfaceBindingPlugin) error {
ifaces := vmi.Spec.Domain.Devices.Interfaces
if len(ifaces) == 0 {
return nil
}
_, allowPodBridgeNetworkLiveMigration := vmi.Annotations[v1.AllowPodBridgeNetworkLiveMigrationAnnotation]
if allowPodBridgeNetworkLiveMigration && isPodNetworkWithBridgeBindingInterface(vmi.Spec.Networks, ifaces) {
return nil
}
if IsPodNetworkWithMasqueradeBindingInterface(vmi.Spec.Networks, ifaces) ||
IsPodNetworkWithMigratableBindingPlugin(vmi.Spec.Networks, ifaces, bindingPlugins) {
return nil
}
return fmt.Errorf(
"cannot migrate VMI which does not use masquerade, bridge with %s VM annotation or a migratable plugin to connect to the pod network",
v1.AllowPodBridgeNetworkLiveMigrationAnnotation,
)
}
func IsPodNetworkWithMigratableBindingPlugin(
networks []v1.Network,
ifaces []v1.Interface,
bindingPlugins map[string]v1.InterfaceBindingPlugin,
) bool {
if podNetwork := LookupPodNetwork(networks); podNetwork != nil {
if podInterface := LookupInterfaceByName(ifaces, podNetwork.Name); podInterface != nil {
if podInterface.Binding != nil {
binding, exist := bindingPlugins[podInterface.Binding.Name]
return exist && binding.Migration != nil
}
}
}
return false
}
func IsPodNetworkWithMasqueradeBindingInterface(networks []v1.Network, ifaces []v1.Interface) bool {
if podNetwork := LookupPodNetwork(networks); podNetwork != nil {
if podInterface := LookupInterfaceByName(ifaces, podNetwork.Name); podInterface != nil {
return podInterface.Masquerade != nil
}
}
return true
}
func isPodNetworkWithBridgeBindingInterface(networks []v1.Network, ifaces []v1.Interface) bool {
if podNetwork := LookupPodNetwork(networks); podNetwork != nil {
if podInterface := LookupInterfaceByName(ifaces, podNetwork.Name); podInterface != nil {
return podInterface.Bridge != nil
}
}
return true
}
func LookupInterfaceStatusByMac(
interfaces []v1.VirtualMachineInstanceNetworkInterface,
macAddress string,
) *v1.VirtualMachineInstanceNetworkInterface {
for index := range interfaces {
if interfaces[index].MAC == macAddress {
return &interfaces[index]
}
}
return nil
}
func LookupInterfaceStatusByName(
interfaces []v1.VirtualMachineInstanceNetworkInterface,
name string,
) *v1.VirtualMachineInstanceNetworkInterface {
for index := range interfaces {
if interfaces[index].Name == name {
return &interfaces[index]
}
}
return nil
}
func IndexInterfaceSpecByName(interfaces []v1.Interface) map[string]v1.Interface {
ifacesByName := map[string]v1.Interface{}
for _, ifaceSpec := range interfaces {
ifacesByName[ifaceSpec.Name] = ifaceSpec
}
return ifacesByName
}
func LookupInterfaceByName(ifaces []v1.Interface, name string) *v1.Interface {
for idx := range ifaces {
if ifaces[idx].Name == name {
return &ifaces[idx]
}
}
return nil
}
func IndexInterfaceStatusByName(
interfaces []v1.VirtualMachineInstanceNetworkInterface,
p func(ifaceStatus v1.VirtualMachineInstanceNetworkInterface) bool,
) map[string]v1.VirtualMachineInstanceNetworkInterface {
indexedInterfaceStatus := map[string]v1.VirtualMachineInstanceNetworkInterface{}
for _, iface := range interfaces {
if p == nil || p(iface) {
indexedInterfaceStatus[iface.Name] = iface
}
}
return indexedInterfaceStatus
}
func FilterInterfacesByNetworks(interfaces []v1.Interface, networks []v1.Network) []v1.Interface {
var ifaces []v1.Interface
ifacesByName := IndexInterfaceSpecByName(interfaces)
for _, net := range networks {
if iface, exists := ifacesByName[net.Name]; exists {
ifaces = append(ifaces, iface)
}
}
return ifaces
}
func BindingPluginNetworkWithDeviceInfoExist(ifaces []v1.Interface, bindingPlugins map[string]v1.InterfaceBindingPlugin) bool {
for _, iface := range ifaces {
if HasBindingPluginDeviceInfo(iface, bindingPlugins) {
return true
}
}
return false
}
func HasBindingPluginDeviceInfo(iface v1.Interface, bindingPlugins map[string]v1.InterfaceBindingPlugin) bool {
if iface.Binding != nil {
binding, exist := bindingPlugins[iface.Binding.Name]
return exist && binding.DownwardAPI == v1.DeviceInfo
}
return false
}
// hasVirtioIface checks whether a VMI references at least one "virtio" network interface.
// Note that the reference can be explicit or implicit (unspecified nic models defaults to "virtio").
func hasVirtioIface(vmi *v1.VirtualMachineInstance) bool {
for _, iface := range vmi.Spec.Domain.Devices.Interfaces {
if iface.Model == "" || iface.Model == v1.VirtIO {
return true
}
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmispec
import v1 "kubevirt.io/api/core/v1"
func LookupNetworkByName(networks []v1.Network, name string) *v1.Network {
for i := range networks {
if networks[i].Name == name {
return &networks[i]
}
}
return nil
}
func LookupPodNetwork(networks []v1.Network) *v1.Network {
for _, network := range networks {
if network.Pod != nil {
net := network
return &net
}
}
return nil
}
func FilterMultusNonDefaultNetworks(networks []v1.Network) []v1.Network {
return FilterNetworksSpec(networks, IsSecondaryMultusNetwork)
}
func FilterNetworksSpec(nets []v1.Network, predicate func(i v1.Network) bool) []v1.Network {
var filteredNets []v1.Network
for _, net := range nets {
if predicate(net) {
filteredNets = append(filteredNets, net)
}
}
return filteredNets
}
func LookUpDefaultNetwork(networks []v1.Network) *v1.Network {
for i, network := range networks {
if !IsSecondaryMultusNetwork(network) {
return &networks[i]
}
}
return nil
}
func IsSecondaryMultusNetwork(net v1.Network) bool {
return net.Multus != nil && !net.Multus.Default
}
func IndexNetworkSpecByName(networks []v1.Network) map[string]v1.Network {
indexedNetworks := map[string]v1.Network{}
for _, network := range networks {
indexedNetworks[network.Name] = network
}
return indexedNetworks
}
func FilterNetworksByInterfaces(networks []v1.Network, interfaces []v1.Interface) []v1.Network {
var nets []v1.Network
networksByName := IndexNetworkSpecByName(networks)
for _, iface := range interfaces {
if net, exists := networksByName[iface.Name]; exists {
nets = append(nets, net)
}
}
return nets
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmliveupdate
import (
"reflect"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/network/vmispec"
)
// IsRestartRequired - Checks if the changes in network related fields require a reset of the VM
// in order for them to be applied
func IsRestartRequired(vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance) bool {
desiredIfaces := vm.Spec.Template.Spec.Domain.Devices.Interfaces
currentIfaces := vmi.Spec.Domain.Devices.Interfaces
desiredNets := vm.Spec.Template.Spec.Networks
currentNets := vmi.Spec.Networks
return shouldIfacesChangeRequireRestart(desiredIfaces, currentIfaces) ||
shouldNetsChangeRequireRestart(desiredNets, currentNets)
}
func shouldIfacesChangeRequireRestart(desiredIfaces, currentIfaces []v1.Interface) bool {
desiredIfacesByName := vmispec.IndexInterfaceSpecByName(desiredIfaces)
currentIfacesByName := vmispec.IndexInterfaceSpecByName(currentIfaces)
return haveCurrentIfacesBeenRemoved(desiredIfacesByName, currentIfacesByName) ||
haveCurrentIfacesChanged(desiredIfacesByName, currentIfacesByName)
}
func shouldNetsChangeRequireRestart(desiredNets, currentNets []v1.Network) bool {
isPodNetworkInDesiredNets := vmispec.LookupPodNetwork(desiredNets) != nil
isPodNetworkInCurrentNets := vmispec.LookupPodNetwork(currentNets) != nil
if isPodNetworkInDesiredNets && !isPodNetworkInCurrentNets {
return true
}
desiredNetsByName := vmispec.IndexNetworkSpecByName(desiredNets)
currentNetsByName := vmispec.IndexNetworkSpecByName(currentNets)
return haveCurrentNetsBeenRemoved(desiredNetsByName, currentNetsByName) ||
haveCurrentNetsChanged(desiredNetsByName, currentNetsByName)
}
// haveCurrentIfacesBeenRemoved checks if interfaces existing in the VMI spec were removed
// from the VM spec without using the hotunplug flow.
func haveCurrentIfacesBeenRemoved(desiredIfacesByName, currentIfacesByName map[string]v1.Interface) bool {
for currentIfaceName := range currentIfacesByName {
if _, desiredIfaceExists := desiredIfacesByName[currentIfaceName]; !desiredIfaceExists {
return true
}
}
return false
}
func haveCurrentIfacesChanged(desiredIfacesByName, currentIfacesByName map[string]v1.Interface) bool {
for currentIfaceName, currentIface := range currentIfacesByName {
desiredIface := desiredIfacesByName[currentIfaceName]
if !areNormalizedIfacesEqual(desiredIface, currentIface) {
return true
}
}
return false
}
func areNormalizedIfacesEqual(iface1, iface2 v1.Interface) bool {
normalizedIface1 := iface1.DeepCopy()
normalizedIface1.State = ""
normalizedIface2 := iface2.DeepCopy()
normalizedIface2.State = ""
return reflect.DeepEqual(normalizedIface1, normalizedIface2)
}
// haveCurrentNetsBeenRemoved checks if networks existing in the VMI spec were removed
// from the VM spec without using the hotunplug flow.
func haveCurrentNetsBeenRemoved(desiredNetsByName, currentNetsByName map[string]v1.Network) bool {
for currentNetName := range currentNetsByName {
if _, desiredNetExists := desiredNetsByName[currentNetName]; !desiredNetExists {
return true
}
}
return false
}
func haveCurrentNetsChanged(desiredNetsByName, currentNetsByName map[string]v1.Network) bool {
for currentNetName, currentNet := range currentNetsByName {
desiredNet := desiredNetsByName[currentNetName]
if !reflect.DeepEqual(desiredNet, currentNet) {
return true
}
}
return false
}
package disk
import (
"encoding/json"
"fmt"
"io"
"os/exec"
)
type DiskInfo struct {
Format string `json:"format"`
BackingFile string `json:"backing-filename"`
ActualSize int64 `json:"actual-size"`
VirtualSize int64 `json:"virtual-size"`
}
const (
QEMUIMGPath = "/usr/bin/qemu-img"
)
func GetDiskInfo(imagePath string) (*DiskInfo, error) {
// #nosec No risk for attacker injection. Only get information about an image
args := []string{"info", imagePath, "--output", "json"}
cmd := exec.Command(QEMUIMGPath, args...)
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, fmt.Errorf("failed to get stderr for qemu-img command: %v", err)
}
out, err := cmd.Output()
if err != nil {
errout, _ := io.ReadAll(stderr)
return nil, fmt.Errorf("failed to invoke qemu-img: %v: %s", err, errout)
}
info := &DiskInfo{}
err = json.Unmarshal(out, info)
if err != nil {
return nil, fmt.Errorf("failed to parse disk info: %v", err)
}
return info, err
}
package disk
import (
"encoding/json"
"fmt"
"os/exec"
"kubevirt.io/client-go/log"
)
const (
DiskSourceFallbackPath = "/disk"
)
func VerifyQCOW2(diskInfo *DiskInfo) error {
if diskInfo.Format != "qcow2" {
return fmt.Errorf("expected a disk format of qcow2, but got '%v'", diskInfo.Format)
}
if diskInfo.BackingFile != "" {
return fmt.Errorf("expected no backing file, but found %v", diskInfo.BackingFile)
}
return nil
}
func VerifyImage(diskInfo *DiskInfo) error {
switch diskInfo.Format {
case "qcow2":
return VerifyQCOW2(diskInfo)
case "raw":
return nil
default:
return fmt.Errorf("unsupported image format: %v", diskInfo.Format)
}
}
func GetDiskInfoWithValidation(imagePath string, diskMemoryLimitBytes int64) (*DiskInfo, error) {
cmd := exec.Command("bash", "-c", fmt.Sprintf("ulimit -t %d && ulimit -v %d && %v info %v --output json", 10, diskMemoryLimitBytes/1024, QEMUIMGPath, imagePath))
log.Log.V(3).Infof("fetching image info. running command: %s", cmd.String())
out, err := cmd.Output()
if err != nil {
if e, ok := err.(*exec.ExitError); ok {
if len(e.Stderr) > 0 {
return nil, fmt.Errorf("failed to invoke qemu-img: %v: '%v'", err, string(e.Stderr))
}
}
return nil, fmt.Errorf("failed to invoke qemu-img: %v", err)
}
info := &DiskInfo{}
err = json.Unmarshal(out, info)
if err != nil {
return nil, fmt.Errorf("failed to parse disk info: %v", err)
}
return info, err
}
/*
Copyright 2016 The Kubernetes Authors.
Copyright 2021 The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file was originally copied from https://github.com/kubernetes/kubernetes/blob/e0a22acaa0c62f3e6f9dd37ab2a4e7d960528edc/pkg/util/filesystem/defaultfs.go
*/
package fs
import (
"io/fs"
"os"
"path/filepath"
"strings"
"time"
)
// DefaultFs implements Filesystem using same-named functions from "os" and "path/filepath"
type DefaultFs struct {
root string
}
var _ Fs = &DefaultFs{}
// NewTempFs returns a fake Filesystem in temporary directory, useful for unit tests
func New() *DefaultFs {
return &DefaultFs{}
}
// NewTempFs returns a fake Filesystem in temporary directory, useful for unit tests
func NewWithRootPath(rootPath string) *DefaultFs {
return &DefaultFs{
root: rootPath,
}
}
func (fs *DefaultFs) prefix(path string) string {
if len(fs.root) == 0 {
return path
}
return filepath.Join(fs.root, path)
}
// Stat via os.Stat
func (fs *DefaultFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(fs.prefix(name))
}
// Create via os.Create
func (fs *DefaultFs) Create(name string) (File, error) {
file, err := os.Create(fs.prefix(name))
if err != nil {
return nil, err
}
return &defaultFile{file}, nil
}
// Rename via os.Rename
func (fs *DefaultFs) Rename(oldpath, newpath string) error {
if !strings.HasPrefix(oldpath, fs.root) {
oldpath = fs.prefix(oldpath)
}
if !strings.HasPrefix(newpath, fs.root) {
newpath = fs.prefix(newpath)
}
return os.Rename(oldpath, newpath)
}
// MkdirAll via os.MkdirAll
func (fs *DefaultFs) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(fs.prefix(path), perm)
}
// Chtimes via os.Chtimes
func (fs *DefaultFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(fs.prefix(name), atime, mtime)
}
// RemoveAll via os.RemoveAll
func (fs *DefaultFs) RemoveAll(path string) error {
return os.RemoveAll(fs.prefix(path))
}
// Remove via os.RemoveAll
func (fs *DefaultFs) Remove(name string) error {
return os.Remove(fs.prefix(name))
}
// ReadFile via os.ReadFile
func (fs *DefaultFs) ReadFile(filename string) ([]byte, error) {
return os.ReadFile(fs.prefix(filename))
}
// WriteFile via os.WriteFile
func (fs *DefaultFs) WriteFile(filename string, data []byte, perm fs.FileMode) error {
return os.WriteFile(fs.prefix(filename), data, perm)
}
// Walk via filepath.Walk
func (fs *DefaultFs) Walk(root string, walkFn filepath.WalkFunc) error {
return filepath.Walk(fs.prefix(root), walkFn)
}
// defaultFile implements File using same-named functions from "os"
type defaultFile struct {
file *os.File
}
// Name via os.File.Name
func (file *defaultFile) Name() string {
return file.file.Name()
}
// Write via os.File.Write
func (file *defaultFile) Write(b []byte) (n int, err error) {
return file.file.Write(b)
}
// Sync via os.File.Sync
func (file *defaultFile) Sync() error {
return file.file.Sync()
}
// Close via os.File.Close
func (file *defaultFile) Close() error {
return file.file.Close()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package pointer
func P[T any](t T) *T {
return &t
}
package safepath
import (
"container/list"
"errors"
"fmt"
"net"
"os"
"path/filepath"
"strings"
"syscall"
"kubevirt.io/kubevirt/pkg/unsafepath"
"golang.org/x/sys/unix"
)
// JoinAndResolveWithRelativeRoot joins an absolute relativeRoot base path with
// additional elements which have to be kept below the relativeRoot base.
// Relative and absolute links will be resolved relative to the provided rootBase
// and can not escape it.
func JoinAndResolveWithRelativeRoot(rootBase string, elems ...string) (*Path, error) {
// ensure that rootBase is absolute
if !filepath.IsAbs(rootBase) {
return nil, fmt.Errorf("basepath is not absolute: %q", rootBase)
}
path := pathRoot
fifo := newLimitedFifo(256)
for i := len(elems) - 1; i >= 0; i-- {
if err := fifo.push(strings.Split(filepath.Clean(elems[i]), pathSeparator)); err != nil {
return nil, err
}
}
for !fifo.empty() {
child := fifo.pop()
var link string
var err error
path, link, err = advance(rootBase, path, child)
if err != nil {
return nil, err
}
if link != "" {
if err := fifo.push(strings.Split(link, pathSeparator)); err != nil {
return nil, err
}
}
}
// Assert that the result is indeed a clean path in the expected format
// at this point in time.
finalPath := newPath(rootBase, path)
fd, err := OpenAtNoFollow(finalPath)
if err != nil {
return nil, err
}
_ = fd.Close()
return finalPath, nil
}
type fifo struct {
ops uint
store *list.List
maxOps uint
}
func (f *fifo) push(pathElements []string) error {
for i := len(pathElements) - 1; i >= 0; i-- {
if f.ops > f.maxOps {
return fmt.Errorf("more than %v path elements evaluated", f.maxOps)
}
if pathElements[i] == "" {
continue
}
f.ops++
f.store.PushFront(pathElements[i])
}
return nil
}
func (f *fifo) pop() string {
if val := f.store.Front(); val != nil {
f.store.Remove(val)
return val.Value.(string)
}
return ""
}
func (f *fifo) empty() bool {
return f.store.Len() == 0
}
// newLimitedFifo creates a fifo with a maximum enqueue limit to
// avoid abuse on filepath operations.
func newLimitedFifo(maxOps uint) *fifo {
return &fifo{
store: list.New(),
maxOps: maxOps,
}
}
// OpenAtNoFollow safely opens a filedescriptor to a path relative to
// rootBase. Any symlink encountered will be treated as invalid and the operation will be aborted.
// This works best together with a path first resolved with JoinAndResolveWithRelativeRoot
// which can resolve relative paths and symlinks.
func OpenAtNoFollow(path *Path) (file *File, err error) {
fd, err := open(path.rootBase)
if err != nil {
return nil, fmt.Errorf("failed opening path %v: %w", path, err)
}
for _, child := range strings.Split(filepath.Clean(path.relativePath), pathSeparator) {
if child == "" {
continue
}
newfd, err := openat(fd, child)
_ = syscall.Close(fd) // always close the parent after the lookup
if err != nil {
return nil, fmt.Errorf("failed opening %s for path %v: %w", child, path, err)
}
fd = newfd
}
return &File{fd: fd, path: path}, nil
}
func ChmodAtNoFollow(path *Path, mode os.FileMode) error {
f, err := OpenAtNoFollow(path)
if err != nil {
return err
}
defer f.Close()
return os.Chmod(f.SafePath(), mode)
}
func ChownAtNoFollow(path *Path, uid, gid int) error {
f, err := OpenAtNoFollow(path)
if err != nil {
return err
}
defer f.Close()
return os.Chown(f.SafePath(), uid, gid)
}
func ChpermAtNoFollow(path *Path, uid, gid int, mode os.FileMode) error {
// first set the ownership, to avoid that someone may change back the file mode
// after we set it. This is necessary if the file got somehow created without
// the right owners, maybe with malicious intent.
if err := ChownAtNoFollow(path, uid, gid); err != nil {
return err
}
if err := ChmodAtNoFollow(path, mode); err != nil {
return err
}
return nil
}
func MkdirAtNoFollow(path *Path, dirName string, mode os.FileMode) error {
if err := isSingleElement(dirName); err != nil {
return err
}
f, err := OpenAtNoFollow(path)
if err != nil {
return err
}
defer f.Close()
if err := unix.Mkdirat(f.fd, dirName, uint32(mode)); err != nil {
return fmt.Errorf("failed making the directory %v: %w", path, err)
}
return nil
}
// TouchAtNoFollow safely touches a file relative to
// rootBase. The additional elements form the relative path. Any symlink
// encountered will be treated as invalid and the operation will be aborted.
// This works best together with a path first resolved with JoinAndResolveWithRelativeRoot
// which can resolve relative paths to their real path without symlinks.
// If the target file exists already, the function will fail.
func TouchAtNoFollow(path *Path, fileName string, mode os.FileMode) (err error) {
if err := isSingleElement(fileName); err != nil {
return err
}
parent, err := OpenAtNoFollow(path)
if err != nil {
return err
}
defer parent.Close()
fd, err := touchat(parent.fd, fileName, uint32(mode))
if err != nil {
return err
}
_ = syscall.Close(fd)
return nil
}
func MknodAtNoFollow(path *Path, fileName string, mode os.FileMode, dev uint64) (err error) {
if err := isSingleElement(fileName); err != nil {
return err
}
parent, err := OpenAtNoFollow(path)
if err != nil {
return err
}
defer parent.Close()
return mknodat(parent.fd, fileName, uint32(mode), dev)
}
func StatAtNoFollow(path *Path) (os.FileInfo, error) {
pathFd, err := OpenAtNoFollow(path)
if err != nil {
return nil, err
}
defer pathFd.Close()
return os.Stat(pathFd.SafePath())
}
func GetxattrNoFollow(path *Path, attr string) ([]byte, error) {
var ret []byte
pathFd, err := OpenAtNoFollow(path)
if err != nil {
return nil, err
}
defer pathFd.Close()
size, err := syscall.Getxattr(pathFd.SafePath(), attr, ret)
if err != nil {
return nil, err
}
ret = make([]byte, size)
_, err = syscall.Getxattr(pathFd.SafePath(), attr, ret)
if err != nil {
return nil, err
}
return ret[:len(ret)-1], nil
}
type File struct {
fd int
path *Path
}
func (f *File) Close() error {
return syscall.Close(f.fd)
}
func (f *File) String() string {
return f.Path().String()
}
// SafePath returns a path pointing to the associated file descriptor.
// It is safe to reuse this path without additional checks. The kernel
// will ensure that this path always points to the resolved file.
// To operate on the file just use os.Open and related calls.
func (f *File) SafePath() string {
return path(f.fd)
}
func (f *File) Path() *Path {
return f.path
}
// Path is a path which was at the time of creation a real path
// re
type Path struct {
rootBase string
relativePath string
}
// Raw returns an "unsafe" path. It's properties are not safe to use without certain precautions.
// It exposes no access functions. All access happens via functions in the "unsafepath" package.
func (p *Path) Raw() *unsafepath.Path {
return unsafepath.New(p.rootBase, p.relativePath)
}
func (p *Path) IsRoot() bool {
return unsafepath.UnsafeAbsolute(p.Raw()) == pathRoot
}
// AppendAndResolveWithRelativeRoot returns a new path with the passed elements resolve relative
// to the current absolute path.
func (p *Path) AppendAndResolveWithRelativeRoot(relativeRootElems ...string) (*Path, error) {
tmpPath, err := JoinAndResolveWithRelativeRoot(unsafepath.UnsafeAbsolute(p.Raw()), relativeRootElems...)
if err != nil {
return nil, err
}
newPath := newPath(p.rootBase, filepath.Join(p.relativePath, tmpPath.relativePath))
fd, err := OpenAtNoFollow(newPath)
if err != nil {
return nil, err
}
_ = fd.Close()
return newPath, err
}
func (p *Path) String() string {
return fmt.Sprintf("root: %v, relative: %v", p.rootBase, p.relativePath)
}
// ExecuteNoFollow opens the file in question and provides the file descriptor path as safePath string.
// This safePath string can be (re)opened with normal os.* operations. The file descriptor path is
// managed by the kernel and there is no way to inject malicious symlinks.
func (p *Path) ExecuteNoFollow(callback func(safePath string) error) error {
f, err := OpenAtNoFollow(p)
if err != nil {
return err
}
defer f.Close()
return callback(f.SafePath())
}
// DirNoFollow returns the parent directory of the safepath.Path as safepath.Path.
func (p *Path) DirNoFollow() (*Path, error) {
if len(p.relativePath) == 0 {
return nil, fmt.Errorf("already at relative root, can't get parent")
}
newPath := newPath(p.rootBase, filepath.Dir(p.relativePath))
return newPath, nil
}
// Base returns the basename of the relative untrusted part of the safepath.
func (p *Path) Base() (string, error) {
if len(p.relativePath) == 0 {
return "", fmt.Errorf("already at relative root, can't get parent")
}
return filepath.Base(p.relativePath), nil
}
func newPath(rootBase, relativePath string) *Path {
return &Path{
rootBase: rootBase,
relativePath: filepath.Join("/", relativePath),
}
}
// NewFileNoFollow assumes that a real path to a file is given. It will validate that
// the file is indeed absolute by doing the following checks:
// - ensure that the path is absolute
// - ensure that the path does not container relative path elements
// - ensure that no symlinks are provided
//
// It will return the opened file which contains a link to a safe-to-use path
// to the file, which can't be tampered with. To operate on the file just use os.Open and related calls.
func NewFileNoFollow(path string) (*File, error) {
if filepath.Clean(path) != path || !filepath.IsAbs(path) {
return nil, fmt.Errorf("path %q must be absolute and must not contain relative elements", path)
}
p := newPath("/", path)
return OpenAtNoFollow(p)
}
// NewPathNoFollow is a convenience method to get out of a supposedly link-free path a safepath.Path.
// If there is a symlink included the command will fail.
func NewPathNoFollow(path string) (*Path, error) {
fd, err := NewFileNoFollow(path)
if err != nil {
return nil, err
}
defer fd.Close()
return fd.Path(), nil
}
// JoinNoFollow joins the root path with the given additional path.
// If the additional path element is not a real path (like containing symlinks), it fails.
func JoinNoFollow(rootPath *Path, path string) (*Path, error) {
if filepath.Clean(path) != path || path == "" {
return nil, fmt.Errorf("path %q must not contain relative elements and must not be empty", path)
}
p := newPath(unsafepath.UnsafeAbsolute(rootPath.Raw()), path)
f, err := OpenAtNoFollow(p)
if err != nil {
return nil, err
}
return f.Path(), f.Close()
}
func isSingleElement(path string) error {
cleanedPath := filepath.Clean(path)
if cleanedPath != path || strings.ContainsAny(path, pathSeparator) {
return fmt.Errorf("path %q must be a single non-relative path segment", path)
}
switch path {
case "", "..", ".":
return fmt.Errorf("path %q must be a single non-relative path segment", path)
default:
return nil
}
}
// UnlinkAtNoFollow allows deleting the specified file or directory (directory must be empty to succeed).
func UnlinkAtNoFollow(path *Path) error {
parent, err := path.DirNoFollow()
if err != nil {
return err
}
basename, err := path.Base()
if err != nil {
return nil
}
info, err := StatAtNoFollow(path)
if err != nil {
return err
}
fd, err := OpenAtNoFollow(parent)
if err != nil {
return err
}
defer fd.Close()
options := 0
if info.IsDir() {
// if dir is empty we can delete it with AT_REMOVEDIR
options = unix.AT_REMOVEDIR
}
if err = unlinkat(fd.fd, basename, options); err != nil {
return fmt.Errorf("failed unlinking path %v: %w", path, err)
}
return nil
}
// ListenUnixNoFollow safely creates a socket in user-owned path
// Since there exists no socketat on unix, first a safe delete is performed,
// then the socket is created.
func ListenUnixNoFollow(socketDir *Path, socketName string) (net.Listener, error) {
if err := isSingleElement(socketName); err != nil {
return nil, err
}
addr, err := net.ResolveUnixAddr("unix", filepath.Join(unsafepath.UnsafeAbsolute(socketDir.Raw()), socketName))
if err != nil {
return nil, err
}
socketPath, err := JoinNoFollow(socketDir, socketName)
if err == nil {
// This ensures that we don't allow unlinking arbitrary files
if err := UnlinkAtNoFollow(socketPath); err != nil {
return nil, fmt.Errorf("failed unlinking socket %v: %w", socketPath, err)
}
} else if !errors.Is(err, os.ErrNotExist) {
return nil, err
}
listener, err := net.ListenUnix("unix", addr)
if err != nil {
return nil, err
}
// Ensure that the socket path is a real path
// this does not 100% remove the chance of
// having a socket created at the wrong place, but it makes it unlikely
_, err = JoinNoFollow(socketDir, socketName)
if err != nil {
return nil, err
}
return listener, nil
}
//go:build linux
package safepath
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"syscall"
"golang.org/x/sys/unix"
)
const pathSeparator = string(os.PathSeparator)
const pathRoot = string(os.PathSeparator)
// advance will try to add the child to the parent. If it is a relative symlink it will resolve it
// and return the parent with the new symlink. If it is an absolute symlink, parent will be reset to '/'
// and returned together with the absolute symlink. If the joined result is no symlink, the joined result will
// be returned as the new parent.
func advance(rootBase string, parent string, child string) (string, string, error) {
// Ensure parent is absolute and never empty
parent = filepath.Clean(parent)
if !filepath.IsAbs(parent) {
return "", "", fmt.Errorf("parent path %v must be absolute", parent)
}
if strings.Contains(child, pathSeparator) {
return "", "", fmt.Errorf("child %q must not contain a path separator", child)
}
// Deal with relative path elements like '.', '//' and '..'
// Since parent is absolute, worst case we get '/' as result
path := filepath.Join(parent, child)
if path == rootBase {
// don't evaluate the root itself, since rootBase is allowed to be a symlink
return path, "", nil
}
fi, err := os.Lstat(filepath.Join(rootBase, path))
if err != nil {
return "", "", err
}
if fi.Mode()&fs.ModeSymlink == 0 {
// no symlink, we are done, return the joined result of parent and child
return filepath.Clean(path), "", nil
}
link, err := os.Readlink(filepath.Join(rootBase, path))
if err != nil {
return "", "", err
}
if filepath.IsAbs(link) {
// the link is absolute, let's reset the parent and the discovered link path
return pathRoot, filepath.Clean(link), nil
} else {
// on relative links, don't advance parent and return the link
return parent, filepath.Clean(link), nil
}
}
// openat helps traversing a path without following symlinks
// to ensure safe path references on user-owned paths by privileged processes
func openat(dirfd int, path string) (fd int, err error) {
if err := isSingleElement(path); err != nil {
return -1, err
}
return unix.Openat(dirfd, path, unix.O_NOFOLLOW|unix.O_PATH, 0)
}
func unlinkat(dirfd int, path string, flags int) error {
if err := isSingleElement(path); err != nil {
return err
}
return unix.Unlinkat(dirfd, path, flags)
}
func touchat(dirfd int, path string, mode uint32) (fd int, err error) {
if err := isSingleElement(path); err != nil {
return -1, err
}
return unix.Openat(dirfd, path, unix.O_NOFOLLOW|syscall.O_CREAT|syscall.O_EXCL, mode)
}
func mknodat(dirfd int, path string, mode uint32, dev uint64) (err error) {
if err := isSingleElement(path); err != nil {
return err
}
return unix.Mknodat(dirfd, path, mode, int(dev))
}
func open(path string) (fd int, err error) {
return syscall.Open(path, unix.O_PATH, 0)
}
func path(fd int) string {
return fmt.Sprintf("/proc/self/fd/%d", fd)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
backupv1 "kubevirt.io/api/backup/v1alpha1"
"kubevirt.io/client-go/kubecli"
backup "kubevirt.io/kubevirt/pkg/storage/cbt"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
// VMBackupAdmitter validates VirtualMachineBackups
type VMBackupAdmitter struct {
Config *virtconfig.ClusterConfig
Client kubecli.KubevirtClient
VMBackupInformer cache.SharedIndexInformer
}
// NewVMBackupAdmitter creates a VMBackupAdmitter
func NewVMBackupAdmitter(config *virtconfig.ClusterConfig, client kubecli.KubevirtClient, vmBackupInformer cache.SharedIndexInformer) *VMBackupAdmitter {
return &VMBackupAdmitter{
Config: config,
Client: client,
VMBackupInformer: vmBackupInformer,
}
}
// Admit validates an AdmissionReview
func (admitter *VMBackupAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != backupv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachinebackups" {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.IncrementalBackupEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("IncrementalBackup feature gate not enabled"))
}
// Only need to validate on Create - spec immutability is now enforced by CEL
if ar.Request.Operation != admissionv1.Create {
return &admissionv1.AdmissionResponse{Allowed: true}
}
vmBackup := &backupv1.VirtualMachineBackup{}
if err := json.Unmarshal(ar.Request.Object.Raw, vmBackup); err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
// Validate that only one backup is in progress per source
causes, err := admitter.validateSingleBackup(vmBackup, ar.Request.Namespace)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
return &admissionv1.AdmissionResponse{Allowed: true}
}
func (admitter *VMBackupAdmitter) validateSingleBackup(vmBackup *backupv1.VirtualMachineBackup, namespace string) ([]metav1.StatusCause, error) {
objects, err := admitter.VMBackupInformer.GetIndexer().ByIndex(cache.NamespaceIndex, namespace)
if err != nil {
return nil, err
}
sourceField := k8sfield.NewPath("spec", "source")
for _, obj := range objects {
existingBackup := obj.(*backupv1.VirtualMachineBackup)
if existingBackup.Name == vmBackup.Name {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineBackup %q already exists", existingBackup.Name),
Field: k8sfield.NewPath("metadata", "name").String(),
}}, nil
}
// Reject if another backup is in progress for the same source
if equality.Semantic.DeepEqual(existingBackup.Spec.Source, vmBackup.Spec.Source) &&
!backup.IsBackupDone(existingBackup.Status) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineBackup %q in progress for source", existingBackup.Name),
Field: sourceField.String(),
}}, nil
}
}
return nil, nil
}
// VMBackupTrackerAdmitter validates VirtualMachineBackupTrackers
type VMBackupTrackerAdmitter struct {
Config *virtconfig.ClusterConfig
}
// NewVMBackupTrackerAdmitter creates a VMBackupTrackerAdmitter
func NewVMBackupTrackerAdmitter(config *virtconfig.ClusterConfig) *VMBackupTrackerAdmitter {
return &VMBackupTrackerAdmitter{
Config: config,
}
}
// Admit validates an AdmissionReview for VirtualMachineBackupTracker
func (admitter *VMBackupTrackerAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != backupv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachinebackuptrackers" {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.IncrementalBackupEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("IncrementalBackup feature gate not enabled"))
}
return &admissionv1.AdmissionResponse{Allowed: true}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"encoding/json"
"fmt"
"reflect"
admissionv1 "k8s.io/api/admission/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)
func (a Admitter) validateVirtualMachineDataVolumeTemplateNamespace() ([]metav1.StatusCause, error) {
var causes []metav1.StatusCause
if a.ar.Operation == admissionv1.Update || a.ar.Operation == admissionv1.Delete {
oldVM := &v1.VirtualMachine{}
if err := json.Unmarshal(a.ar.OldObject.Raw, oldVM); err != nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeUnexpectedServerResponse,
Message: "Could not fetch old VM",
}}, nil
}
if equality.Semantic.DeepEqual(oldVM.Spec.DataVolumeTemplates, a.vm.Spec.DataVolumeTemplates) {
return nil, nil
}
}
for idx, dataVolume := range a.vm.Spec.DataVolumeTemplates {
targetNamespace := a.vm.Namespace
if targetNamespace == "" {
targetNamespace = a.ar.Namespace
}
if dataVolume.Namespace != "" && dataVolume.Namespace != targetNamespace {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Embedded DataVolume namespace %s differs from VM namespace %s", dataVolume.Namespace, targetNamespace),
Field: k8sfield.NewPath("spec", "dataVolumeTemplates").Index(idx).String(),
})
continue
}
}
return causes, nil
}
func ValidateDataVolumeTemplate(field *k8sfield.Path, spec *v1.VirtualMachineSpec) (causes []metav1.StatusCause) {
for idx, dataVolume := range spec.DataVolumeTemplates {
cause := validateDataVolume(field.Child("dataVolumeTemplate").Index(idx), dataVolume)
if cause != nil {
causes = append(causes, cause...)
continue
}
dataVolumeRefFound := false
for _, volume := range spec.Template.Spec.Volumes {
if volume.VolumeSource.PersistentVolumeClaim != nil && volume.VolumeSource.PersistentVolumeClaim.ClaimName == dataVolume.Name ||
volume.VolumeSource.DataVolume != nil && volume.VolumeSource.DataVolume.Name == dataVolume.Name {
dataVolumeRefFound = true
break
}
}
if !dataVolumeRefFound {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("DataVolumeTemplate entry %s must be referenced in the VMI template's 'volumes' list", field.Child("dataVolumeTemplate").Index(idx).String()),
Field: field.Child("dataVolumeTemplate").Index(idx).String(),
})
}
}
return causes
}
func validateDataVolume(field *k8sfield.Path, dataVolume v1.DataVolumeTemplateSpec) []metav1.StatusCause {
if dataVolume.Name == "" {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("'name' field must not be empty for DataVolumeTemplate entry %s.", field.Child("name").String()),
Field: field.Child("name").String(),
}}
}
if dataVolume.Spec.PVC == nil && dataVolume.Spec.Storage == nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Missing Data volume PVC or Storage",
Field: field.Child("PVC", "Storage").String(),
}}
}
if dataVolume.Spec.PVC != nil && dataVolume.Spec.Storage != nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Duplicate storage definition, both target storage and target pvc defined",
Field: field.Child("PVC", "Storage").String(),
}}
}
var dataSourceRef *corev1.TypedObjectReference
var dataSource *corev1.TypedLocalObjectReference
if dataVolume.Spec.PVC != nil {
dataSourceRef = dataVolume.Spec.PVC.DataSourceRef
dataSource = dataVolume.Spec.PVC.DataSource
} else if dataVolume.Spec.Storage != nil {
dataSourceRef = dataVolume.Spec.Storage.DataSourceRef
dataSource = dataVolume.Spec.Storage.DataSource
}
// dataVolume is externally populated
if (dataSourceRef != nil || dataSource != nil) &&
(dataVolume.Spec.Source != nil || dataVolume.Spec.SourceRef != nil) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "External population is incompatible with Source and SourceRef",
Field: field.Child("source").String(),
}}
}
if (dataSourceRef == nil && dataSource == nil) &&
(dataVolume.Spec.Source == nil && dataVolume.Spec.SourceRef == nil) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Data volume should have either Source, SourceRef, or be externally populated",
Field: field.Child("source", "sourceRef").String(),
}}
}
if dataVolume.Spec.Source != nil {
return validateNumberOfSources(field, dataVolume.Spec.Source)
}
return nil
}
func validateNumberOfSources(field *field.Path, source *cdiv1.DataVolumeSource) []metav1.StatusCause {
numberOfSources := 0
s := reflect.ValueOf(source).Elem()
for i := 0; i < s.NumField(); i++ {
if !reflect.ValueOf(s.Field(i).Interface()).IsNil() {
numberOfSources++
}
}
if numberOfSources == 0 {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Missing dataVolume valid source",
Field: field.Child("source").String(),
}}
}
if numberOfSources > 1 {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Multiple dataVolume sources",
Field: field.Child("source").String(),
}}
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"fmt"
"path/filepath"
"regexp"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
hwutil "kubevirt.io/kubevirt/pkg/util/hardware"
)
const (
maxStrLen = 256
// Should be a power of 2
minCustomBlockSize = 512
maxCustomBlockSize = 2097152 // 2 MB
)
var isValidExpression = regexp.MustCompile(`^[A-Za-z0-9_.+-]+$`).MatchString
func ValidateDisks(field *k8sfield.Path, disks []v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
for idx, disk := range disks {
causes = append(causes, validateDiskName(field, idx, disks)...)
causes = append(causes, validateDeviceTarget(field, idx, disk)...)
causes = append(causes, validatePciAddress(field, idx, disk)...)
causes = append(causes, validateBootOrderValue(field, idx, disk)...)
causes = append(causes, validateBusSupport(field, idx, disk)...)
causes = append(causes, validateSerialNumValue(field, idx, disk)...)
causes = append(causes, validateSerialNumLength(field, idx, disk)...)
causes = append(causes, validateCacheMode(field, idx, disk)...)
causes = append(causes, validateIOMode(field, idx, disk)...)
causes = append(causes, validateErrorPolicy(field, idx, disk)...)
// Verify disk and volume name can be a valid container name since disk
// name can become a container name which will fail to schedule if invalid
causes = append(causes, validateDiskNameAsContainerName(field, idx, disk)...)
causes = append(causes, validateBlockSize(field, idx, disk)...)
}
return causes
}
func ValidateContainerDisks(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
for idx, volume := range spec.Volumes {
if volume.ContainerDisk == nil || volume.ContainerDisk.Path == "" {
continue
}
causes = append(causes, ValidatePath(field.Child("volumes").Index(idx).Child("containerDisk"), volume.ContainerDisk.Path)...)
}
return causes
}
func validateDiskName(field *k8sfield.Path, idx int, disks []v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
for otherIdx, disk := range disks {
if otherIdx < idx && disk.Name == disks[idx].Name {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s and %s must not have the same Name.", field.Index(idx).String(), field.Index(otherIdx).String()),
Field: field.Index(idx).Child("name").String(),
})
}
}
return causes
}
func validateDeviceTarget(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
deviceTargetSetCount := 0
if disk.Disk != nil {
deviceTargetSetCount++
}
if disk.LUN != nil {
deviceTargetSetCount++
}
if disk.CDRom != nil {
deviceTargetSetCount++
}
// NOTE: not setting a device target is okay. We default to Disk.
// However, only a single device target is allowed to be set at a time.
if deviceTargetSetCount > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s can only have a single target type defined", field.Index(idx).String()),
Field: field.Index(idx).String(),
})
}
return causes
}
func validatePciAddress(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
if disk.Disk == nil || disk.Disk.PciAddress == "" {
return causes
}
if disk.Disk.Bus != v1.DiskBusVirtio {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("disk %s - setting a PCI address is only possible with bus type virtio.", field.Child("domain", "devices", "disks", "disk").Index(idx).Child("name").String()),
Field: field.Child("domain", "devices", "disks", "disk").Index(idx).Child("pciAddress").String(),
})
}
if _, err := hwutil.ParsePciAddress(disk.Disk.PciAddress); err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("disk %s has malformed PCI address (%s).", field.Child("domain", "devices", "disks", "disk").Index(idx).Child("name").String(), disk.Disk.PciAddress),
Field: field.Child("domain", "devices", "disks", "disk").Index(idx).Child("pciAddress").String(),
})
}
return causes
}
func validateBootOrderValue(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
if disk.BootOrder != nil && *disk.BootOrder < 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have a boot order > 0, if supplied", field.Index(idx).String()),
Field: field.Index(idx).Child("bootOrder").String(),
})
}
return causes
}
func getDiskBus(disk v1.Disk) v1.DiskBus {
switch {
case disk.Disk != nil:
return disk.Disk.Bus
case disk.LUN != nil:
return disk.LUN.Bus
case disk.CDRom != nil:
return disk.CDRom.Bus
default:
return ""
}
}
func getDiskType(disk v1.Disk) string {
switch {
case disk.Disk != nil:
return "disk"
case disk.LUN != nil:
return "lun"
case disk.CDRom != nil:
return "cdrom"
default:
return ""
}
}
func validateBusSupport(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
bus := getDiskBus(disk)
diskType := getDiskType(disk)
if bus == "" {
return causes
}
switch bus {
case "ide":
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "IDE bus is not supported",
Field: field.Index(idx).Child(diskType, "bus").String(),
})
case v1.DiskBusVirtio:
// special case. virtio is incompatible with CD-ROM for q35 machine types
if diskType == "cdrom" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Bus type %s is invalid for CD-ROM device", bus),
Field: field.Index(idx).Child("cdrom", "bus").String(),
})
}
case v1.DiskBusSATA:
// sata disks (in contrast to sata cdroms) don't support readOnly
if disk.Disk != nil && disk.Disk.ReadOnly {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s hard-disks do not support read-only.", bus),
Field: field.Index(idx).Child("disk", "bus").String(),
})
}
case v1.DiskBusSCSI, v1.DiskBusUSB:
break
default:
supportedBuses := []v1.DiskBus{v1.DiskBusVirtio, v1.DiskBusSCSI, v1.DiskBusSATA, v1.DiskBusUSB}
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s is set with an unrecognized bus %s, must be one of: %v", field.Index(idx).String(), bus, supportedBuses),
Field: field.Index(idx).Child(diskType, "bus").String(),
})
}
// Reject defining DedicatedIOThread to a disk without VirtIO bus since this configuration
// is not supported in libvirt.
if disk.DedicatedIOThread != nil && *disk.DedicatedIOThread && bus != v1.DiskBusVirtio {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("IOThreads are not supported for disks on a %s bus", bus),
Field: field.Child("domain", "devices", "disks").Index(idx).String(),
})
}
return causes
}
func validateSerialNumValue(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
if disk.Serial != "" && !isValidExpression(disk.Serial) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must be made up of the following characters [A-Za-z0-9_.+-], if specified", field.Index(idx).String()),
Field: field.Index(idx).Child("serial").String(),
})
}
return causes
}
func validateSerialNumLength(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
if disk.Serial != "" && len([]rune(disk.Serial)) > maxStrLen {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must be less than or equal to %d in length, if specified", field.Index(idx).String(), maxStrLen),
Field: field.Index(idx).Child("serial").String(),
})
}
return causes
}
func validateCacheMode(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
if disk.Cache != "" && disk.Cache != v1.CacheNone && disk.Cache != v1.CacheWriteThrough && disk.Cache != v1.CacheWriteBack {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s has invalid value %s", field.Index(idx).Child("cache").String(), disk.Cache),
Field: field.Index(idx).Child("cache").String(),
})
}
return causes
}
func validateIOMode(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
if disk.IO != "" && disk.IO != v1.IONative && disk.IO != v1.IOThreads {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("Disk IO mode for %s is not supported. Supported modes are: native, threads.", field),
Field: field.Child("domain", "devices", "disks").Index(idx).Child("io").String(),
})
}
return causes
}
func validateErrorPolicy(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
if disk.ErrorPolicy != nil && *disk.ErrorPolicy != v1.DiskErrorPolicyStop && *disk.ErrorPolicy != v1.DiskErrorPolicyIgnore && *disk.ErrorPolicy != v1.DiskErrorPolicyReport && *disk.ErrorPolicy != v1.DiskErrorPolicyEnospace {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s has invalid value \"%s\"", field.Index(idx).Child("errorPolicy").String(), *disk.ErrorPolicy),
Field: field.Index(idx).Child("errorPolicy").String(),
})
}
return causes
}
func validateDiskNameAsContainerName(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
for _, err := range validation.IsDNS1123Label(disk.Name) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: err,
Field: field.Child("domain", "devices", "disks").Index(idx).Child("name").String(),
})
}
return causes
}
func validateCustomBlockSize(field *k8sfield.Path, idx int, blockType string, size uint) []metav1.StatusCause {
var causes []metav1.StatusCause
switch {
case size < minCustomBlockSize:
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Provided size of %d is less than the supported minimum size of %d", size, minCustomBlockSize),
Field: field.Index(idx).Child("blockSize").Child("custom").Child(blockType).String(),
})
case size > maxCustomBlockSize:
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Provided size of %d is greater than the supported maximum size of %d", size, maxCustomBlockSize),
Field: field.Index(idx).Child("blockSize").Child("custom").Child(blockType).String(),
})
case size&(size-1) != 0:
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Provided size of %d is not a power of 2", size),
Field: field.Index(idx).Child("blockSize").Child("custom").Child(blockType).String(),
})
}
return causes
}
func validateBlockSize(field *k8sfield.Path, idx int, disk v1.Disk) []metav1.StatusCause {
var causes []metav1.StatusCause
if disk.BlockSize == nil || disk.BlockSize.Custom == nil {
return causes
}
if disk.BlockSize.MatchVolume != nil && (disk.BlockSize.MatchVolume.Enabled == nil || *disk.BlockSize.MatchVolume.Enabled) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Block size matching can't be enabled together with a custom value",
Field: field.Index(idx).Child("blockSize").String(),
})
return causes
}
customSize := disk.BlockSize.Custom
if customSize.Logical > customSize.Physical {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Logical size %d must be the same or less than the physical size of %d", customSize.Logical, customSize.Physical),
Field: field.Index(idx).Child("blockSize").Child("custom").Child("logical").String(),
})
} else if getDiskBus(disk) == v1.DiskBusSATA && customSize.Logical != minCustomBlockSize {
// For IDE and SATA disks in QEMU, the emulated controllers only support a logical size of 512 bytes.
// https://gitlab.com/qemu-project/qemu/-/blob/f0007b7f03e2d7fc33e71c3a582f2364c51a226b/hw/ide/ide-dev.c#L105
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Logical size %d must be %d for SATA devices", customSize.Logical, minCustomBlockSize),
Field: field.Index(idx).Child("blockSize").Child("custom").Child("logical").String(),
})
} else if customSize.DiscardGranularity != nil && customSize.Logical != 0 && *customSize.DiscardGranularity%customSize.Logical != 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Discard granularity %d must be multiples of logical size %d", *customSize.DiscardGranularity, customSize.Logical),
Field: field.Index(idx).Child("blockSize").Child("custom").Child("discardGranularity").String(),
})
} else {
causes = append(causes, validateCustomBlockSize(field, idx, "logical", customSize.Logical)...)
causes = append(causes, validateCustomBlockSize(field, idx, "physical", customSize.Physical)...)
}
return causes
}
func ValidatePath(field *k8sfield.Path, path string) []metav1.StatusCause {
var causes []metav1.StatusCause
if path == "/" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must not point to root",
field.String(),
),
Field: field.String(),
})
return causes
}
cleanedPath := filepath.Join("/", path)
providedPath := strings.TrimSuffix(path, "/") // Join trims suffix slashes
if cleanedPath != providedPath {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must be an absolute path to a file without relative components",
field.String(),
),
Field: field.String(),
})
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"fmt"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
// AdmitHotplugStorage compares the old and new volumes and disks, and ensures that they match and are valid.
func AdmitHotplugStorage(newVolumes, oldVolumes []v1.Volume, newDisks, oldDisks []v1.Disk, volumeStatuses []v1.VolumeStatus, newVMI *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig) *admissionv1.AdmissionResponse {
if err := validateExpectedDisksAndFilesystems(newVolumes, newDisks, newVMI.Spec.Domain.Devices.Filesystems, config); err != nil {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: err.Error(),
},
})
}
newHotplugVolumeMap := getHotplugVolumes(newVolumes, volumeStatuses)
newPermanentVolumeMap := getPermanentVolumes(newVolumes, volumeStatuses)
oldHotplugVolumeMap := getHotplugVolumes(oldVolumes, volumeStatuses)
oldPermanentVolumeMap := getPermanentVolumes(oldVolumes, volumeStatuses)
migratedVolumeMap := getMigratedVolumeMaps(newVMI.Status.MigratedVolumes)
newDiskMap := getDiskMap(newDisks)
oldDiskMap := getDiskMap(oldDisks)
permanentAr := verifyPermanentVolumes(newPermanentVolumeMap, oldPermanentVolumeMap, newDiskMap, oldDiskMap, migratedVolumeMap)
if permanentAr != nil {
return permanentAr
}
hotplugAr := verifyHotplugVolumes(newHotplugVolumeMap, oldHotplugVolumeMap, newDiskMap, oldDiskMap, migratedVolumeMap)
if hotplugAr != nil {
return hotplugAr
}
return nil
}
func ValidateHotplugDiskConfiguration(disk *v1.Disk, name, messagePrefix, field string) []metav1.StatusCause {
if disk == nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s for [%s] requires the disk field to be set.", messagePrefix, name),
Field: field,
}}
}
bus := getDiskBus(*disk)
switch {
case disk.DiskDevice.Disk != nil:
if bus != v1.DiskBusSCSI && bus != v1.DiskBusVirtio {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s for disk [%s] requires bus to be 'scsi' or 'virtio'. [%s] is not permitted.", messagePrefix, name, bus),
Field: field,
}}
}
case disk.DiskDevice.LUN != nil:
if bus != v1.DiskBusSCSI {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s for LUN [%s] requires bus to be 'scsi'. [%s] is not permitted.", messagePrefix, name, bus),
Field: field,
}}
}
default:
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s for [%s] requires diskDevice of type 'disk' or 'lun' to be used.", messagePrefix, name),
Field: field,
}}
}
if disk.DedicatedIOThread != nil && *disk.DedicatedIOThread && bus != v1.DiskBusVirtio {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s for [%s] requires virtio bus for IOThreads.", messagePrefix, name),
Field: field,
}}
}
// Validate boot order
if disk.BootOrder != nil {
order := *disk.BootOrder
if order < 1 {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("spec.domain.devices.disks[1] must have a boot order > 0, if supplied"),
Field: fmt.Sprintf("spec.domain.devices.disks[1].bootOrder"),
}}
}
}
return nil
}
func validateExpectedDisksAndFilesystems(volumes []v1.Volume, disks []v1.Disk, filesystems []v1.Filesystem, config *virtconfig.ClusterConfig) error {
names := make(map[string]struct{})
for _, volume := range volumes {
if volume.MemoryDump == nil {
names[volume.Name] = struct{}{}
}
}
requiredVolumes := len(filesystems)
for _, disk := range disks {
_, ok := names[disk.Name]
// it's okay for a CDRom to not be mapped to a volume
if !ok && disk.CDRom != nil && config.DeclarativeHotplugVolumesEnabled() {
continue
}
requiredVolumes++
}
// Make sure volume is not mapped to multiple disks/filesystems or volume mapped to nothing
if requiredVolumes != len(names) {
return fmt.Errorf("mismatch between volumes declared (%d) and required (%d)", len(names), requiredVolumes)
}
return nil
}
func verifyHotplugVolumes(newHotplugVolumeMap, oldHotplugVolumeMap map[string]v1.Volume, newDisks, oldDisks map[string]v1.Disk,
migratedVols map[string]bool) *admissionv1.AdmissionResponse {
for k, v := range newHotplugVolumeMap {
if _, ok := oldHotplugVolumeMap[k]; ok {
_, okMigVol := migratedVols[k]
// New and old have same volume, ensure they are the same
if !equality.Semantic.DeepEqual(v, oldHotplugVolumeMap[k]) && !okMigVol {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("hotplug volume %s, changed", k),
},
})
}
if v.MemoryDump == nil {
if _, ok := newDisks[k]; !ok {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("volume %s doesn't have a matching disk", k),
},
})
}
if !equality.Semantic.DeepEqual(newDisks[k], oldDisks[k]) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("hotplug disk %s, changed", k),
},
})
}
}
} else {
// This is a new volume, ensure that the volume is either DV, PVC or memoryDumpVolume
if v.DataVolume == nil && v.PersistentVolumeClaim == nil && v.MemoryDump == nil {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("volume %s is not a PVC or DataVolume", k),
},
})
}
if v.MemoryDump == nil {
// Also ensure the matching new disk exists and has a valid bus
if _, ok := newDisks[k]; !ok {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("disk %s does not exist", k),
},
})
}
disk := newDisks[k]
if _, ok := oldDisks[k]; !ok {
causes := ValidateHotplugDiskConfiguration(&disk, k, "Hotplug configuration", "")
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
}
}
}
}
return nil
}
func isMigratedVolume(newVol, oldVol *v1.Volume, migratedVolumeMap map[string]bool) bool {
if newVol.Name != oldVol.Name {
return false
}
_, ok := migratedVolumeMap[newVol.Name]
return ok
}
func verifyPermanentVolumes(newPermanentVolumeMap, oldPermanentVolumeMap map[string]v1.Volume, newDisks, oldDisks map[string]v1.Disk, migratedVolumeMap map[string]bool) *admissionv1.AdmissionResponse {
if len(newPermanentVolumeMap) != len(oldPermanentVolumeMap) {
// Removed one of the permanent volumes, reject admission.
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Number of permanent volumes has changed",
},
})
}
// Ensure we didn't modify any permanent volumes
for k, v := range newPermanentVolumeMap {
// Know at this point the new old and permanent have the same count.
if _, ok := oldPermanentVolumeMap[k]; !ok {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("permanent volume %s, not found", k),
},
})
}
oldVol := oldPermanentVolumeMap[k]
if isMigratedVolume(&v, &oldVol, migratedVolumeMap) {
continue
}
if !equality.Semantic.DeepEqual(v, oldVol) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("permanent volume %s, changed", k),
},
})
}
if !equality.Semantic.DeepEqual(newDisks[k], oldDisks[k]) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("permanent disk %s, changed", k),
},
})
}
}
return nil
}
func getDiskMap(disks []v1.Disk) map[string]v1.Disk {
newDiskMap := make(map[string]v1.Disk, 0)
for _, disk := range disks {
if disk.Name != "" {
newDiskMap[disk.Name] = disk
}
}
return newDiskMap
}
func getHotplugVolumes(volumes []v1.Volume, volumeStatuses []v1.VolumeStatus) map[string]v1.Volume {
permanentVolumesFromStatus := make(map[string]v1.Volume, 0)
for _, volume := range volumeStatuses {
if volume.HotplugVolume == nil {
permanentVolumesFromStatus[volume.Name] = v1.Volume{}
}
}
permanentVolumes := make(map[string]v1.Volume, 0)
for _, volume := range volumes {
if _, ok := permanentVolumesFromStatus[volume.Name]; !ok {
permanentVolumes[volume.Name] = volume
}
}
return permanentVolumes
}
func getPermanentVolumes(volumes []v1.Volume, volumeStatuses []v1.VolumeStatus) map[string]v1.Volume {
permanentVolumesFromStatus := make(map[string]v1.Volume, 0)
for _, volume := range volumeStatuses {
if volume.HotplugVolume == nil {
permanentVolumesFromStatus[volume.Name] = v1.Volume{}
}
}
permanentVolumes := make(map[string]v1.Volume, 0)
for _, volume := range volumes {
if _, ok := permanentVolumesFromStatus[volume.Name]; ok {
permanentVolumes[volume.Name] = volume
}
}
return permanentVolumes
}
func getMigratedVolumeMaps(migratedDisks []v1.StorageMigratedVolumeInfo) map[string]bool {
volumes := make(map[string]bool)
for _, v := range migratedDisks {
volumes[v.VolumeName] = true
}
return volumes
}
func validateUtilityVolumes(field *field.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if len(spec.UtilityVolumes) > 0 && !config.UtilityVolumesEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "UtilityVolumes feature gate is not enabled",
Field: field.Child("utilityVolumes").String(),
})
return causes
}
volumeNameMap := make(map[string]int)
for idx, volume := range spec.Volumes {
volumeNameMap[volume.Name] = idx
}
utilityVolumeNameMap := make(map[string]int)
for idx, utilityVolume := range spec.UtilityVolumes {
if utilityVolume.Name == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: "UtilityVolume 'name' must be set",
Field: field.Child("utilityVolumes").Index(idx).Child("name").String(),
})
}
if otherIdx, exists := utilityVolumeNameMap[utilityVolume.Name]; exists {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueDuplicate,
Message: fmt.Sprintf("%s and %s must not have the same Name.", field.Child("utilityVolumes").Index(idx).String(), field.Child("utilityVolumes").Index(otherIdx).String()),
Field: field.Child("utilityVolumes").Index(idx).Child("name").String(),
})
} else {
utilityVolumeNameMap[utilityVolume.Name] = idx
}
// Check for conflicts with regular volume names
if regularVolumeIdx, exists := volumeNameMap[utilityVolume.Name]; exists {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueDuplicate,
Message: fmt.Sprintf("%s conflicts with %s. UtilityVolume names must be unique across both volumes and utilityVolumes.", field.Child("utilityVolumes").Index(idx).String(), field.Child("volumes").Index(regularVolumeIdx).String()),
Field: field.Child("utilityVolumes").Index(idx).Child("name").String(),
})
}
// Validate PVC claim name is set
if utilityVolume.PersistentVolumeClaimVolumeSource.ClaimName == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: "UtilityVolume PVC 'claimName' must be set",
Field: field.Child("utilityVolumes").Index(idx).Child("claimName").String(),
})
}
}
return causes
}
func AdmitUtilityVolumes(newSpec, oldSpec *v1.VirtualMachineInstanceSpec, volumeStatuses []v1.VolumeStatus, config *virtconfig.ClusterConfig) *admissionv1.AdmissionResponse {
if causes := validateUtilityVolumes(field.NewPath("spec"), newSpec, config); len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
if oldSpec == nil {
return nil
}
// Ensure utility volumes are never treated as permanent volumes
// They should always be hotplug volumes
permanentVolumesFromStatus := make(map[string]bool)
for _, volumeStatus := range volumeStatuses {
if volumeStatus.HotplugVolume == nil {
permanentVolumesFromStatus[volumeStatus.Name] = true
}
}
// Check that no utility volumes are marked as permanent
for _, utilityVolume := range newSpec.UtilityVolumes {
if permanentVolumesFromStatus[utilityVolume.Name] {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("utility volume %s cannot be a permanent volume, utility volumes must always be hotplug", utilityVolume.Name),
},
})
}
}
newUtilityVolumeMap := getUtilityVolumeMap(newSpec.UtilityVolumes)
oldUtilityVolumeMap := getUtilityVolumeMap(oldSpec.UtilityVolumes)
for name, newVolume := range newUtilityVolumeMap {
if oldVolume, exists := oldUtilityVolumeMap[name]; exists {
// Existing utility volume, ensure it hasn't changed
if !equality.Semantic.DeepEqual(newVolume, oldVolume) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("utility volume %s has changed", name),
},
})
}
}
}
return nil
}
func getUtilityVolumeMap(utilityVolumes []v1.UtilityVolume) map[string]v1.UtilityVolume {
utilityVolumeMap := make(map[string]v1.UtilityVolume)
for _, volume := range utilityVolumes {
utilityVolumeMap[volume.Name] = volume
}
return utilityVolumeMap
}
func ValidateUtilityVolumesNotPresentOnCreation(field *field.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.UtilityVolumes != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "cannot create VMI with utility volumes in spec, utility volumes can only be added via hotplug",
Field: field.Child("utilityVolumes").String(),
})
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
type Admitter struct {
virtClient kubecli.KubevirtClient
ctx context.Context
ar *admissionv1.AdmissionRequest
vm *v1.VirtualMachine
clusterConfig *virtconfig.ClusterConfig
}
func NewAdmitter(virtClient kubecli.KubevirtClient, ctx context.Context, ar *admissionv1.AdmissionRequest, vm *v1.VirtualMachine, clusterConfig *virtconfig.ClusterConfig) *Admitter {
return &Admitter{
virtClient: virtClient,
ctx: ctx,
ar: ar,
vm: vm,
clusterConfig: clusterConfig,
}
}
func (a Admitter) AdmitStatus() []metav1.StatusCause {
causes := a.validateSnapshotStatus()
if len(causes) > 0 {
return causes
}
causes = a.validateRestoreStatus()
if len(causes) > 0 {
return causes
}
return causes
}
func (a Admitter) Admit() ([]metav1.StatusCause, error) {
causes, err := a.validateVirtualMachineDataVolumeTemplateNamespace()
if err != nil || len(causes) > 0 {
return causes, err
}
causes = a.AdmitStatus()
if len(causes) > 0 {
return causes, err
}
return causes, nil
}
func Admit(virtClient kubecli.KubevirtClient, ctx context.Context, ar *admissionv1.AdmissionRequest, vm *v1.VirtualMachine, clusterConfig *virtconfig.ClusterConfig) ([]metav1.StatusCause, error) {
storageAdmitter := NewAdmitter(virtClient, ctx, ar, vm, clusterConfig)
return storageAdmitter.Admit()
}
func AdmitStatus(virtClient kubecli.KubevirtClient, ctx context.Context, ar *admissionv1.AdmissionRequest, vm *v1.VirtualMachine, clusterConfig *virtconfig.ClusterConfig) []metav1.StatusCause {
storageAdmitter := NewAdmitter(virtClient, ctx, ar, vm, clusterConfig)
return storageAdmitter.AdmitStatus()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"encoding/json"
"fmt"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
)
func (a *Admitter) validateRestoreStatus() []metav1.StatusCause {
if a.ar.Operation != admissionv1.Update || a.vm.Status.RestoreInProgress == nil {
return nil
}
oldVM := &v1.VirtualMachine{}
if err := json.Unmarshal(a.ar.OldObject.Raw, oldVM); err != nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeUnexpectedServerResponse,
Message: "Could not fetch old vm",
}}
}
if !equality.Semantic.DeepEqual(oldVM.Spec, a.vm.Spec) {
oldStrategy, _ := oldVM.RunStrategy()
newStrategy, _ := a.vm.RunStrategy()
if newStrategy != oldStrategy {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("Cannot update VM runStrategy until restore %q completes", *a.vm.Status.RestoreInProgress),
Field: k8sfield.NewPath("spec").String(),
}}
}
}
return nil
}
func (a *Admitter) validateSnapshotStatus() []metav1.StatusCause {
if a.ar.Operation != admissionv1.Update || a.vm.Status.SnapshotInProgress == nil {
return nil
}
oldVM := &v1.VirtualMachine{}
if err := json.Unmarshal(a.ar.OldObject.Raw, oldVM); err != nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeUnexpectedServerResponse,
Message: "Could not fetch old vm",
}}
}
if !compareVolumes(oldVM.Spec.Template.Spec.Volumes, a.vm.Spec.Template.Spec.Volumes) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("Cannot update vm disks or volumes until snapshot %q completes", *a.vm.Status.SnapshotInProgress),
Field: k8sfield.NewPath("spec").String(),
}}
}
if !compareRunningSpec(&oldVM.Spec, &a.vm.Spec) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("Cannot update vm running state until snapshot %q completes", *a.vm.Status.SnapshotInProgress),
Field: k8sfield.NewPath("spec").String(),
}}
}
return nil
}
func compareVolumes(old, new []v1.Volume) bool {
if len(old) != len(new) {
return false
}
for i, volume := range old {
if !equality.Semantic.DeepEqual(volume, new[i]) {
return false
}
}
return true
}
func compareRunningSpec(old, new *v1.VirtualMachineSpec) bool {
if old == nil || new == nil {
// This should never happen, but just in case return false
return false
}
// Its impossible to get here while both running and RunStrategy are nil.
if old.Running != nil && new.Running != nil {
return *old.Running == *new.Running
}
if old.RunStrategy != nil && new.RunStrategy != nil {
return *old.RunStrategy == *new.RunStrategy
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
virt "kubevirt.io/api/core"
exportv1 "kubevirt.io/api/export/v1beta1"
"kubevirt.io/api/snapshot"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
const (
pvc = "PersistentVolumeClaim"
vmSnapshotKind = "VirtualMachineSnapshot"
vmKind = "VirtualMachine"
)
// VMExportAdmitter validates VirtualMachineExports
type VMExportAdmitter struct {
Config *virtconfig.ClusterConfig
}
// NewVMExportAdmitter creates a VMExportAdmitter
func NewVMExportAdmitter(config *virtconfig.ClusterConfig) *VMExportAdmitter {
return &VMExportAdmitter{
Config: config,
}
}
// Admit validates an AdmissionReview
func (admitter *VMExportAdmitter) Admit(_ context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != exportv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachineexports" {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.VMExportEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("vm export feature gate not enabled"))
}
vmExport := &exportv1.VirtualMachineExport{}
// TODO ideally use UniversalDeserializer here
err := json.Unmarshal(ar.Request.Object.Raw, vmExport)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
switch ar.Request.Operation {
case admissionv1.Create:
sourceField := k8sfield.NewPath("spec", "source")
switch vmExport.Spec.Source.Kind {
case pvc:
causes = append(causes, admitter.validatePVCName(sourceField.Child("name"), vmExport.Spec.Source.Name)...)
causes = append(causes, admitter.validatePVCApiGroup(sourceField.Child("APIGroup"), vmExport.Spec.Source.APIGroup)...)
case vmSnapshotKind:
causes = append(causes, admitter.validateVMSnapshotName(sourceField.Child("name"), vmExport.Spec.Source.Name)...)
causes = append(causes, admitter.validateVMSnapshotApiGroup(sourceField.Child("APIGroup"), vmExport.Spec.Source.APIGroup)...)
case vmKind:
causes = append(causes, admitter.validateVMName(sourceField.Child("name"), vmExport.Spec.Source.Name)...)
causes = append(causes, admitter.validateVMApiGroup(sourceField.Child("APIGroup"), vmExport.Spec.Source.APIGroup)...)
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid kind",
Field: sourceField.Child("kind").String(),
},
}
}
case admissionv1.Update:
prevObj := &exportv1.VirtualMachineExport{}
err = json.Unmarshal(ar.Request.OldObject.Raw, prevObj)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if !equality.Semantic.DeepEqual(prevObj.Spec, vmExport.Spec) {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "spec in immutable after creation",
Field: k8sfield.NewPath("spec").String(),
},
}
}
default:
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected operation %s", ar.Request.Operation))
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
}
func (admitter *VMExportAdmitter) validatePVCName(field *k8sfield.Path, name string) []metav1.StatusCause {
if name == "" {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "PVC name must not be empty",
Field: field.String(),
},
}
}
return []metav1.StatusCause{}
}
func (admitter *VMExportAdmitter) validatePVCApiGroup(field *k8sfield.Path, apigroup *string) []metav1.StatusCause {
if apigroup != nil && *apigroup != "" {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "PVC API group must be missing or blank",
Field: field.String(),
},
}
}
return []metav1.StatusCause{}
}
func (admitter *VMExportAdmitter) validateVMSnapshotName(field *k8sfield.Path, name string) []metav1.StatusCause {
if name == "" {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "VMSnapshot name must not be empty",
Field: field.String(),
},
}
}
return []metav1.StatusCause{}
}
func (admitter *VMExportAdmitter) validateVMSnapshotApiGroup(field *k8sfield.Path, apigroup *string) []metav1.StatusCause {
if apigroup == nil || *apigroup != snapshot.GroupName {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "VMSnapshot API group must be " + snapshot.GroupName,
Field: field.String(),
},
}
}
return []metav1.StatusCause{}
}
func (admitter *VMExportAdmitter) validateVMName(field *k8sfield.Path, name string) []metav1.StatusCause {
if name == "" {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Virtual Machine name must not be empty",
Field: field.String(),
},
}
}
return []metav1.StatusCause{}
}
func (admitter *VMExportAdmitter) validateVMApiGroup(field *k8sfield.Path, apigroup *string) []metav1.StatusCause {
if apigroup == nil || *apigroup != virt.GroupName {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "VM API group must be " + virt.GroupName,
Field: field.String(),
},
}
}
return []metav1.StatusCause{}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
"strings"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
"kubevirt.io/api/core"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/kubecli"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
// VMRestoreAdmitter validates VirtualMachineRestores
type VMRestoreAdmitter struct {
Config *virtconfig.ClusterConfig
Client kubecli.KubevirtClient
VMRestoreInformer cache.SharedIndexInformer
}
// NewVMRestoreAdmitter creates a VMRestoreAdmitter
func NewVMRestoreAdmitter(config *virtconfig.ClusterConfig, client kubecli.KubevirtClient, vmRestoreInformer cache.SharedIndexInformer) *VMRestoreAdmitter {
return &VMRestoreAdmitter{
Config: config,
Client: client,
VMRestoreInformer: vmRestoreInformer,
}
}
// Admit validates an AdmissionReview
func (admitter *VMRestoreAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != snapshotv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachinerestores" {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.SnapshotEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("Snapshot/Restore feature gate not enabled"))
}
vmRestore := &snapshotv1.VirtualMachineRestore{}
// TODO ideally use UniversalDeserializer here
err := json.Unmarshal(ar.Request.Object.Raw, vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
switch ar.Request.Operation {
case admissionv1.Create:
targetField := k8sfield.NewPath("spec", "target")
if vmRestore.Spec.Target.APIGroup == nil {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotFound,
Message: "missing apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
} else {
switch *vmRestore.Spec.Target.APIGroup {
case core.GroupName:
switch vmRestore.Spec.Target.Kind {
case "VirtualMachine":
causes, err = admitter.validateTargetVM(ctx, k8sfield.NewPath("spec"), vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
newCauses := admitter.validateVolumeOverrides(ctx, vmRestore)
if newCauses != nil {
causes = append(causes, newCauses...)
}
newCauses = admitter.validateVolumeRestorePolicy(ctx, vmRestore)
if newCauses != nil {
causes = append(causes, newCauses...)
}
newCauses = admitter.validateVolumeOwnershipPolicy(ctx, vmRestore)
if newCauses != nil {
causes = append(causes, newCauses...)
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid kind",
Field: targetField.Child("kind").String(),
},
}
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
}
}
objects, err := admitter.VMRestoreInformer.GetIndexer().ByIndex(cache.NamespaceIndex, ar.Request.Namespace)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
for _, obj := range objects {
r := obj.(*snapshotv1.VirtualMachineRestore)
if equality.Semantic.DeepEqual(r.Spec.Target, vmRestore.Spec.Target) &&
(r.Status == nil || r.Status.Complete == nil || !*r.Status.Complete) {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineRestore %q in progress", r.Name),
Field: targetField.String(),
}
causes = append(causes, cause)
}
}
case admissionv1.Update:
prevObj := &snapshotv1.VirtualMachineRestore{}
err = json.Unmarshal(ar.Request.OldObject.Raw, prevObj)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if !equality.Semantic.DeepEqual(prevObj.Spec, vmRestore.Spec) {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "spec in immutable after creation",
Field: k8sfield.NewPath("spec").String(),
},
}
}
default:
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected operation %s", ar.Request.Operation))
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
}
func (admitter *VMRestoreAdmitter) validateTargetVM(ctx context.Context, field *k8sfield.Path, vmRestore *snapshotv1.VirtualMachineRestore) (causes []metav1.StatusCause, err error) {
targetName := vmRestore.Spec.Target.Name
namespace := vmRestore.Namespace
causes = admitter.validatePatches(vmRestore.Spec.Patches, field.Child("patches"))
vmSnapshot, err := admitter.Client.VirtualMachineSnapshot(namespace).Get(ctx, vmRestore.Spec.VirtualMachineSnapshotName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
target, err := admitter.Client.VirtualMachine(namespace).Get(ctx, targetName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, err
}
sourceTargetVmsAreDifferent := errors.IsNotFound(err) || (vmSnapshot.Status.SourceUID != nil && target.UID != *vmSnapshot.Status.SourceUID)
if sourceTargetVmsAreDifferent {
contentName := vmSnapshot.Status.VirtualMachineSnapshotContentName
if contentName == nil {
return nil, fmt.Errorf("snapshot content name is nil in vmSnapshot status")
}
vmSnapshotContent, err := admitter.Client.VirtualMachineSnapshotContent(namespace).Get(ctx, *contentName, metav1.GetOptions{})
if err != nil {
return nil, err
}
snapshotVM := vmSnapshotContent.Spec.Source.VirtualMachine
if snapshotVM == nil {
return nil, fmt.Errorf("unexpected snapshot source")
}
if backendstorage.IsBackendStorageNeeded(snapshotVM) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Restore to a different VM is not supported when snapshotted VM has backend storage (persistent TPM or EFI)",
Field: field.String(),
})
}
}
return causes, nil
}
func (admitter *VMRestoreAdmitter) validatePatches(patches []string, field *k8sfield.Path) (causes []metav1.StatusCause) {
// Validate patches are either on labels/annotations or on elements under "/spec/" path only
for _, patch := range patches {
for _, patchKeyValue := range strings.Split(strings.Trim(patch, "{}"), ",") {
// For example, if the original patch is {"op": "replace", "path": "/metadata/name", "value": "someValue"}
// now we're iterating on [`"op": "replace"`, `"path": "/metadata/name"`, `"value": "someValue"`]
keyValSlice := strings.Split(patchKeyValue, ":")
if len(keyValSlice) != 2 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(`patch format is not valid - one ":" expected in a single key-value json patch: %s`, patchKeyValue),
Field: field.String(),
})
continue
}
key := strings.TrimSpace(keyValSlice[0])
value := strings.TrimSpace(keyValSlice[1])
if key == `"path"` {
if strings.HasPrefix(value, `"/metadata/labels/`) || strings.HasPrefix(value, `"/metadata/annotations/`) {
continue
}
if !strings.HasPrefix(value, `"/spec/`) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("patching is valid only for elements under /spec/ only: %s", patchKeyValue),
Field: field.String(),
})
}
}
}
}
return causes
}
func (admitter *VMRestoreAdmitter) validateVolumeOverrides(ctx context.Context, vmRestore *snapshotv1.VirtualMachineRestore) (causes []metav1.StatusCause) {
// Cancel if there's no volume override
if vmRestore.Spec.VolumeRestoreOverrides == nil {
return nil
}
// Check each individual override
for i, override := range vmRestore.Spec.VolumeRestoreOverrides {
if override.VolumeName == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("must provide a volume name"),
Field: k8sfield.NewPath("spec").
Child("volumeRestoreOverrides").
Index(i).Child("volumeName").
String(),
})
}
if override.RestoreName == "" && override.Annotations == nil && override.Labels == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("must provide at least one overriden field"),
Field: k8sfield.NewPath("spec").Child("volumeRestoreOverrides").Index(i).String(),
})
}
}
return causes
}
func (admitter *VMRestoreAdmitter) validateVolumeRestorePolicy(ctx context.Context, vmRestore *snapshotv1.VirtualMachineRestore) (causes []metav1.StatusCause) {
// Cancel if there's no volume restore policy
if vmRestore.Spec.VolumeRestorePolicy == nil {
return nil
}
policy := *vmRestore.Spec.VolumeRestorePolicy
// Verify the policy provided is among the ones that are allowed
switch policy {
case snapshotv1.VolumeRestorePolicyInPlace:
case snapshotv1.VolumeRestorePolicyRandomizeNames:
return nil
default:
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("volume restore policy \"%s\" doesn't exist", policy),
Field: k8sfield.NewPath("spec").
Child("volumeRestorePolicy").
String(),
})
}
return causes
}
func (admitter *VMRestoreAdmitter) validateVolumeOwnershipPolicy(ctx context.Context, vmRestore *snapshotv1.VirtualMachineRestore) (causes []metav1.StatusCause) {
// Cancel if there's no volume ownership policy
if vmRestore.Spec.VolumeOwnershipPolicy == nil {
return nil
}
policy := *vmRestore.Spec.VolumeOwnershipPolicy
// Verify the policy provided is among the ones that are allowed
switch policy {
case snapshotv1.VolumeOwnershipPolicyVm:
case snapshotv1.VolumeOwnershipPolicyNone:
return nil
default:
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("volume ownership policy \"%s\" doesn't exist", policy),
Field: k8sfield.NewPath("spec").
Child("volumeOwnershipPolicy").
String(),
})
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"kubevirt.io/api/core"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/kubecli"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
// VMSnapshotAdmitter validates VirtualMachineSnapshots
type VMSnapshotAdmitter struct {
Config *virtconfig.ClusterConfig
Client kubecli.KubevirtClient
}
// NewVMSnapshotAdmitter creates a VMSnapshotAdmitter
func NewVMSnapshotAdmitter(config *virtconfig.ClusterConfig, client kubecli.KubevirtClient) *VMSnapshotAdmitter {
return &VMSnapshotAdmitter{
Config: config,
Client: client,
}
}
// Admit validates an AdmissionReview
func (admitter *VMSnapshotAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != snapshotv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachinesnapshots" {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.SnapshotEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("snapshot feature gate not enabled"))
}
vmSnapshot := &snapshotv1.VirtualMachineSnapshot{}
// TODO ideally use UniversalDeserializer here
err := json.Unmarshal(ar.Request.Object.Raw, vmSnapshot)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
switch ar.Request.Operation {
case admissionv1.Create:
sourceField := k8sfield.NewPath("spec", "source")
if vmSnapshot.Spec.Source.APIGroup == nil {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotFound,
Message: "missing apiGroup",
Field: sourceField.Child("apiGroup").String(),
},
}
break
}
switch *vmSnapshot.Spec.Source.APIGroup {
case core.GroupName:
if vmSnapshot.Spec.Source.Kind != "VirtualMachine" {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid kind",
Field: sourceField.Child("kind").String(),
},
}
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid apiGroup",
Field: sourceField.Child("apiGroup").String(),
},
}
}
case admissionv1.Update:
prevObj := &snapshotv1.VirtualMachineSnapshot{}
err = json.Unmarshal(ar.Request.OldObject.Raw, prevObj)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if !equality.Semantic.DeepEqual(prevObj.Spec, vmSnapshot.Spec) {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "spec in immutable after creation",
Field: k8sfield.NewPath("spec").String(),
},
}
}
default:
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected operation %s", ar.Request.Operation))
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package backendstorage
import (
"context"
"fmt"
"strings"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
corev1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/storage/cbt"
"kubevirt.io/kubevirt/pkg/tpm"
"kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
const (
PVCPrefix = "persistent-state-for"
PVCSize = "10Mi"
// LabelApplyStorageProfile is a label used by the CDI mutating webhook
// to modify the PVC according to the storage profile.
LabelApplyStorageProfile = "cdi.kubevirt.io/applyStorageProfile"
)
func basePVC(vmi *corev1.VirtualMachineInstance) string {
return PVCPrefix + "-" + vmi.Name
}
func PVCForVMI(pvcStore cache.Store, vmi *corev1.VirtualMachineInstance) *v1.PersistentVolumeClaim {
var legacyPVC *v1.PersistentVolumeClaim
objs := pvcStore.List()
for _, obj := range objs {
pvc := obj.(*v1.PersistentVolumeClaim)
if pvc.Namespace != vmi.Namespace {
continue
}
if pvc.DeletionTimestamp != nil {
continue
}
vmName, found := pvc.Labels[PVCPrefix]
if found && vmName == vmi.Name {
return pvc
}
if pvc.Name == basePVC(vmi) {
legacyPVC = pvc
}
}
return legacyPVC
}
func pvcForMigrationTargetFromStore(pvcStore cache.Store, migration *corev1.VirtualMachineInstanceMigration) *v1.PersistentVolumeClaim {
objs := pvcStore.List()
for _, obj := range objs {
pvc := obj.(*v1.PersistentVolumeClaim)
if pvc.Namespace != migration.Namespace {
continue
}
migrationName, found := pvc.Labels[corev1.MigrationNameLabel]
if found && migrationName == migration.Name {
return pvc
}
}
return nil
}
func PVCForMigrationTarget(pvcStore cache.Store, migration *corev1.VirtualMachineInstanceMigration) *v1.PersistentVolumeClaim {
if migration.Status.MigrationState != nil && migration.Status.MigrationState.TargetPersistentStatePVCName != "" {
key := controller.NamespacedKey(migration.Namespace, migration.Status.MigrationState.TargetPersistentStatePVCName)
obj, exists, err := pvcStore.GetByKey(key)
if err != nil || !exists {
return nil
}
return obj.(*v1.PersistentVolumeClaim)
}
return pvcForMigrationTargetFromStore(pvcStore, migration)
}
func RecoverFromBrokenMigration(client kubecli.KubevirtClient, migration *corev1.VirtualMachineInstanceMigration, pvcStore cache.Store, vmi *corev1.VirtualMachineInstance, launcherImage string) error {
if migration.Status.MigrationState == nil ||
migration.Status.MigrationState.TargetPersistentStatePVCName == migration.Status.MigrationState.SourcePersistentStatePVCName {
// The migration either didn't actually start, or the backend storage is RWX.
// In both cases we consider the migration as failed.
migration.Status.Phase = corev1.MigrationFailed
return nil
}
// An interrupted migration exists. Using a job to check if the source PVC contains /meta/migrated,
// which would indicate that the libvirt migration finished.
// A JobComplete condition indicates the file is present, the migration was successful and the target PVC prevails
// A JobFailed condition indicated the file is absent, the migration didn't finish and the source PVC prevails
jobName := "recover-" + migration.Name
job, err := client.BatchV1().Jobs(vmi.Namespace).Get(context.Background(), jobName, metav1.GetOptions{})
if err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
job = buildRecoveryJob(jobName, launcherImage, migration)
job, err = client.BatchV1().Jobs(vmi.Namespace).Create(context.Background(), job, metav1.CreateOptions{})
if err != nil {
return err
}
// The job was just created, return an error to be re-enqueued to check on the job
return fmt.Errorf("a migration recovery had to be initiated")
}
for _, c := range job.Status.Conditions {
switch c.Type {
case batchv1.JobComplete:
if c.Status == v1.ConditionTrue {
err = MigrationHandoff(client, pvcStore, migration)
if err == nil {
migration.Status.Phase = corev1.MigrationSucceeded
}
return err
}
case batchv1.JobFailed:
if c.Status == v1.ConditionTrue {
if c.Reason == batchv1.JobReasonPodFailurePolicy {
// The job ran properly but didn't find /meta/migrated, meaning the migration failed
err = MigrationAbort(client, migration)
if err == nil {
migration.Status.Phase = corev1.MigrationFailed
}
return err
} else {
// The job failed to run properly. Deleting it to retry asap.
// Ignoring the deletion error because the job may already be gone, or will get auto-removed anyway.
_ = client.BatchV1().Jobs(job.Namespace).Delete(context.Background(), job.Name, metav1.DeleteOptions{
PropagationPolicy: pointer.P(metav1.DeletePropagationBackground),
})
return fmt.Errorf("%s", c.Message)
}
}
default:
break
}
}
return fmt.Errorf("migration recovery job still running")
}
func buildRecoveryJob(jobName, launcherImage string, migration *corev1.VirtualMachineInstanceMigration) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(migration, corev1.VirtualMachineInstanceMigrationGroupVersionKind),
},
},
Spec: batchv1.JobSpec{
ActiveDeadlineSeconds: pointer.P(int64(30)),
BackoffLimit: pointer.P(int32(0)),
TTLSecondsAfterFinished: pointer.P(int32(30)),
PodFailurePolicy: &batchv1.PodFailurePolicy{
Rules: []batchv1.PodFailurePolicyRule{{
Action: batchv1.PodFailurePolicyActionFailJob,
OnExitCodes: &batchv1.PodFailurePolicyOnExitCodesRequirement{
ContainerName: pointer.P("container"),
Operator: "In",
Values: []int32{42},
},
}},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
GenerateName: jobName + "-",
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: pointer.P(true),
RunAsUser: pointer.P(int64(util.NonRootUID)),
RunAsGroup: pointer.P(int64(util.NonRootUID)),
FSGroup: pointer.P(int64(util.NonRootUID)),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
},
Containers: []v1.Container{{
Name: "container",
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: pointer.P(false),
Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
},
Image: launcherImage,
Command: []string{"bash"},
Args: []string{"-c", "ls /meta/migrated || exit 42"},
VolumeMounts: []v1.VolumeMount{{
Name: "backend-storage",
MountPath: "/meta",
SubPath: "meta",
}},
}},
Volumes: []v1.Volume{{
Name: "backend-storage",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: migration.Status.MigrationState.SourcePersistentStatePVCName,
},
},
}},
},
},
},
}
}
func (bs *BackendStorage) labelLegacyPVC(pvc *v1.PersistentVolumeClaim, name string) {
labelPatch := patch.New()
if len(pvc.Labels) == 0 {
labelPatch.AddOption(patch.WithAdd("/metadata/labels", map[string]string{PVCPrefix: name}))
} else {
labelPatch.AddOption(patch.WithReplace("/metadata/labels/"+PVCPrefix, name))
}
labelPatchPayload, err := labelPatch.GeneratePayload()
if err == nil {
_, err = bs.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Patch(context.Background(), pvc.Name, types.JSONPatchType, labelPatchPayload, metav1.PatchOptions{})
if err != nil {
log.Log.Reason(err).Warningf("failed to label legacy PVC %s/%s", pvc.Namespace, pvc.Name)
}
}
}
func CurrentPVCName(vmi *corev1.VirtualMachineInstance) string {
for _, volume := range vmi.Status.VolumeStatus {
if strings.Contains(volume.Name, basePVC(vmi)) {
return volume.PersistentVolumeClaimInfo.ClaimName
}
}
return ""
}
func HasPersistentEFI(vmiSpec *corev1.VirtualMachineInstanceSpec) bool {
return vmiSpec.Domain.Firmware != nil &&
vmiSpec.Domain.Firmware.Bootloader != nil &&
vmiSpec.Domain.Firmware.Bootloader.EFI != nil &&
vmiSpec.Domain.Firmware.Bootloader.EFI.Persistent != nil &&
*vmiSpec.Domain.Firmware.Bootloader.EFI.Persistent
}
func IsBackendStorageNeeded(obj interface{}) bool {
switch obj := obj.(type) {
case *corev1.VirtualMachine:
if obj.Spec.Template == nil {
return false
}
return tpm.HasPersistentDevice(&obj.Spec.Template.Spec) ||
HasPersistentEFI(&obj.Spec.Template.Spec) ||
cbt.HasCBTStateEnabled(obj.Status.ChangedBlockTracking)
case *snapshotv1.VirtualMachine:
if obj.Spec.Template == nil {
return false
}
// CBT alone doesn't require backend storage restoration for snapshot VMs
return tpm.HasPersistentDevice(&obj.Spec.Template.Spec) ||
HasPersistentEFI(&obj.Spec.Template.Spec)
case *corev1.VirtualMachineInstance:
return tpm.HasPersistentDevice(&obj.Spec) ||
HasPersistentEFI(&obj.Spec) ||
cbt.HasCBTStateEnabled(obj.Status.ChangedBlockTracking)
default:
log.Log.Errorf("unsupported object type: %T", obj)
return false
}
}
// MigrationHandoff runs at the end of a successful live migration.
// It labels the target backend-storage PVC as current for the VM and deletes the source backend-storage PVC.
func MigrationHandoff(client kubecli.KubevirtClient, pvcStore cache.Store, migration *corev1.VirtualMachineInstanceMigration) error {
if migration == nil || migration.Status.MigrationState == nil ||
(migration.Status.MigrationState.SourcePersistentStatePVCName == "" && !migration.IsDecentralized()) ||
migration.Status.MigrationState.TargetPersistentStatePVCName == "" {
return fmt.Errorf("missing source and/or target PVC name(s)")
}
sourcePVC := migration.Status.MigrationState.SourcePersistentStatePVCName
targetPVC := migration.Status.MigrationState.TargetPersistentStatePVCName
if sourcePVC == targetPVC {
// RWX backend-storage, nothing to do
return nil
}
// Let's label the target first, then remove the source.
// The target might already be labelled if this function was already called for this migration
target := pvcForMigrationTargetFromStore(pvcStore, migration)
if target == nil {
return fmt.Errorf("target PVC not found for migration %s/%s", migration.Namespace, migration.Name)
}
labels := target.Labels
if labels == nil {
labels = make(map[string]string)
}
existing, ok := labels[PVCPrefix]
if ok && existing != migration.Spec.VMIName {
return fmt.Errorf("target PVC for %s is already labelled for another VMI: %s", migration.Spec.VMIName, existing)
}
if _, migrationLabelExists := target.Labels[corev1.MigrationNameLabel]; migrationLabelExists {
labelPatchPayload, err := patch.New(
patch.WithReplace("/metadata/labels/"+PVCPrefix, migration.Spec.VMIName),
patch.WithTest("/metadata/labels/"+patch.EscapeJSONPointer(corev1.MigrationNameLabel), migration.Name),
patch.WithRemove("/metadata/labels/"+patch.EscapeJSONPointer(corev1.MigrationNameLabel)),
).GeneratePayload()
if err != nil {
return fmt.Errorf("failed to generate PVC patch: %v", err)
}
_, err = client.CoreV1().PersistentVolumeClaims(migration.Namespace).Patch(context.Background(), targetPVC, types.JSONPatchType, labelPatchPayload, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("failed to patch PVC: %v", err)
}
}
if sourcePVC != "" {
err := client.CoreV1().PersistentVolumeClaims(migration.Namespace).Delete(context.Background(), sourcePVC, metav1.DeleteOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return fmt.Errorf("failed to delete PVC: %v", err)
}
}
return nil
}
// MigrationAbort runs at the end of a failed live migration.
// It just removes the target backend-storage PVC.
func MigrationAbort(client kubecli.KubevirtClient, migration *corev1.VirtualMachineInstanceMigration) error {
if migration == nil || migration.Status.MigrationState == nil ||
migration.Status.MigrationState.TargetPersistentStatePVCName == "" {
return nil
}
sourcePVC := migration.Status.MigrationState.SourcePersistentStatePVCName
targetPVC := migration.Status.MigrationState.TargetPersistentStatePVCName
if sourcePVC == targetPVC {
// RWX backend-storage, nothing to delete
return nil
}
err := client.CoreV1().PersistentVolumeClaims(migration.Namespace).Delete(context.Background(), targetPVC, metav1.DeleteOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return fmt.Errorf("failed to delete PVC: %v", err)
}
return nil
}
type BackendStorage struct {
client kubecli.KubevirtClient
clusterConfig *virtconfig.ClusterConfig
scStore cache.Store
spStore cache.Store
pvcStore cache.Store
}
func NewBackendStorage(client kubecli.KubevirtClient, clusterConfig *virtconfig.ClusterConfig, scStore cache.Store, spStore cache.Store, pvcStore cache.Store) *BackendStorage {
return &BackendStorage{
client: client,
clusterConfig: clusterConfig,
scStore: scStore,
spStore: spStore,
pvcStore: pvcStore,
}
}
func (bs *BackendStorage) getStorageClass() (string, error) {
storageClass := bs.clusterConfig.GetVMStateStorageClass()
if storageClass != "" {
return storageClass, nil
}
k8sDefault := ""
kvDefault := ""
for _, obj := range bs.scStore.List() {
sc := obj.(*storagev1.StorageClass)
if sc.Annotations["storageclass.kubevirt.io/is-default-virt-class"] == "true" {
kvDefault = sc.Name
}
if sc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
k8sDefault = sc.Name
}
}
if kvDefault != "" {
return kvDefault, nil
} else if k8sDefault != "" {
return k8sDefault, nil
} else {
return "", fmt.Errorf("no default storage class found")
}
}
func (bs *BackendStorage) getAccessMode(storageClass string, mode v1.PersistentVolumeMode) v1.PersistentVolumeAccessMode {
// The default access mode should be RWX if the storage class was manually specified.
// However, if we're using the cluster default storage class, default to access mode RWO.
accessMode := v1.ReadWriteMany
if bs.clusterConfig.GetVMStateStorageClass() == "" {
accessMode = v1.ReadWriteOnce
}
// Storage profiles are guaranteed to have the same name as their storage class
obj, exists, err := bs.spStore.GetByKey(storageClass)
if err != nil {
log.Log.Reason(err).Infof("couldn't access storage profiles, defaulting to %s", accessMode)
return accessMode
}
if !exists {
log.Log.Infof("no storage profile found for %s, defaulting to %s", storageClass, accessMode)
return accessMode
}
storageProfile := obj.(*cdiv1.StorageProfile)
if storageProfile.Status.ClaimPropertySets == nil || len(storageProfile.Status.ClaimPropertySets) == 0 {
log.Log.Infof("no ClaimPropertySets in storage profile %s, defaulting to %s", storageProfile.Name, accessMode)
return accessMode
}
foundrwo := false
for _, property := range storageProfile.Status.ClaimPropertySets {
if property.VolumeMode == nil || *property.VolumeMode != mode || property.AccessModes == nil {
continue
}
for _, accessMode := range property.AccessModes {
switch accessMode {
case v1.ReadWriteMany:
return v1.ReadWriteMany
case v1.ReadWriteOnce:
foundrwo = true
}
}
}
if foundrwo {
return v1.ReadWriteOnce
}
return accessMode
}
func (bs *BackendStorage) UpdateVolumeStatus(vmi *corev1.VirtualMachineInstance, pvc *v1.PersistentVolumeClaim) {
if vmi.Status.VolumeStatus == nil {
vmi.Status.VolumeStatus = []corev1.VolumeStatus{}
}
for i := range vmi.Status.VolumeStatus {
if vmi.Status.VolumeStatus[i].Name == pvc.Name {
if vmi.Status.VolumeStatus[i].PersistentVolumeClaimInfo == nil {
vmi.Status.VolumeStatus[i].PersistentVolumeClaimInfo = &corev1.PersistentVolumeClaimInfo{}
}
vmi.Status.VolumeStatus[i].PersistentVolumeClaimInfo.ClaimName = pvc.Name
vmi.Status.VolumeStatus[i].PersistentVolumeClaimInfo.AccessModes = pvc.Spec.AccessModes
return
}
}
vmi.Status.VolumeStatus = append(vmi.Status.VolumeStatus, corev1.VolumeStatus{
Name: pvc.Name,
PersistentVolumeClaimInfo: &corev1.PersistentVolumeClaimInfo{
ClaimName: pvc.Name,
AccessModes: pvc.Spec.AccessModes,
},
})
}
func (bs *BackendStorage) createPVC(vmi *corev1.VirtualMachineInstance, labels map[string]string) (*v1.PersistentVolumeClaim, error) {
storageClass, err := bs.getStorageClass()
if err != nil {
return nil, err
}
mode := v1.PersistentVolumeFilesystem
accessMode := bs.getAccessMode(storageClass, mode)
ownerReferences := vmi.OwnerReferences
if len(vmi.OwnerReferences) == 0 {
// If the VMI has no owner, then it did not originate from a VM.
// In that case, we tie the PVC to the VMI, rendering it quite useless since it won't actually persist.
// The alternative is to remove this `if` block, allowing the PVC to persist after the VMI is deleted.
// However, that would pose security and littering concerns.
ownerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(vmi, corev1.VirtualMachineInstanceGroupVersionKind),
}
}
// Adding this label to allow the PVC to be processed by the CDI WebhookPvcRendering mutating webhook,
// which must be enabled in the CDI CR via feature gate.
// This mutating webhook processes the PVC based on its associated StorageProfile.
// For example, a profile can define a minimum supported volume size via the annotation:
// cdi.kubevirt.io/minimumSupportedPvcSize: 4Gi
// This helps avoid issues with provisioners that reject the hardcoded 10Mi PVC size used here.
labels[LabelApplyStorageProfile] = "true"
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: basePVC(vmi) + "-",
OwnerReferences: ownerReferences,
Labels: labels,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{accessMode},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse(PVCSize)},
},
StorageClassName: &storageClass,
VolumeMode: &mode,
},
}
pvc, err = bs.client.CoreV1().PersistentVolumeClaims(vmi.Namespace).Create(context.Background(), pvc, metav1.CreateOptions{})
if err != nil {
return nil, err
}
return pvc, nil
}
func (bs *BackendStorage) DeletePVCForVMI(vmi *corev1.VirtualMachineInstance, pvcName string) error {
return bs.client.CoreV1().PersistentVolumeClaims(vmi.Namespace).Delete(context.Background(), pvcName, metav1.DeleteOptions{})
}
func (bs *BackendStorage) CreatePVCForVMI(vmi *corev1.VirtualMachineInstance) (*v1.PersistentVolumeClaim, error) {
pvc := PVCForVMI(bs.pvcStore, vmi)
if pvc == nil {
return bs.createPVC(vmi, map[string]string{PVCPrefix: vmi.Name})
}
if _, exists := pvc.Labels[PVCPrefix]; !exists {
bs.labelLegacyPVC(pvc, vmi.Name)
}
return pvc, nil
}
func (bs *BackendStorage) CreatePVCForMigrationTarget(vmi *corev1.VirtualMachineInstance, migrationName string) (*v1.PersistentVolumeClaim, error) {
pvc := PVCForVMI(bs.pvcStore, vmi)
if pvc != nil {
if len(pvc.Status.AccessModes) > 0 && pvc.Status.AccessModes[0] == v1.ReadWriteMany {
// The source PVC is RWX, so it can be used for the target too
return pvc, nil
}
}
return bs.createPVC(vmi, map[string]string{corev1.MigrationNameLabel: migrationName})
}
// IsPVCReady returns true if either:
// - No PVC is needed for the VMI since it doesn't use backend storage
// - The backend storage PVC is bound
// - The backend storage PVC is pending uses a WaitForFirstConsumer storage class
func (bs *BackendStorage) IsPVCReady(vmi *corev1.VirtualMachineInstance, pvcName string) (bool, error) {
if !IsBackendStorageNeeded(vmi) {
return true, nil
}
obj, exists, err := bs.pvcStore.GetByKey(controller.NamespacedKey(vmi.Namespace, pvcName))
if err != nil {
return false, err
}
if !exists {
return false, fmt.Errorf("pvc %s not found in namespace %s", pvcName, vmi.Namespace)
}
pvc := obj.(*v1.PersistentVolumeClaim)
switch pvc.Status.Phase {
case v1.ClaimBound:
return true, nil
case v1.ClaimLost:
return false, fmt.Errorf("backend storage PVC lost")
case v1.ClaimPending:
if pvc.Spec.StorageClassName == nil {
return false, fmt.Errorf("no storage class name")
}
obj, exists, err := bs.scStore.GetByKey(*pvc.Spec.StorageClassName)
if err != nil {
return false, err
}
if !exists {
return false, fmt.Errorf("storage class %s not found", *pvc.Spec.StorageClassName)
}
sc := obj.(*storagev1.StorageClass)
if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
return true, nil
}
}
return false, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package cbt
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
backupv1 "kubevirt.io/api/backup/v1alpha1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
hotplugdisk "kubevirt.io/kubevirt/pkg/hotplug-disk"
"kubevirt.io/kubevirt/pkg/pointer"
)
const (
vmBackupFinalizer = "backup.kubevirt.io/vmbackup-protection"
backupInitializingEvent = "VirtualMachineBackupInitializing"
backupInitiatedEvent = "VirtualMachineBackupInitiated"
backupCompletedEvent = "VirtualMachineBackupCompletedSuccessfully"
backupCompletedWithWarningEvent = "VirtualMachineBackupCompletedWithWarning"
backupInitializing = "Backup is initializing"
backupInProgress = "Backup is in progress"
backupDeleting = "Backup is deleting"
backupCompleted = "Successfully completed VirtualMachineBackup"
backupCompletedWithWarningMsg = "Completed VirtualMachineBackup, warning: %s"
vmNotFoundMsg = "VM %s/%s doesnt exist"
vmNotRunningMsg = "vm %s is not running, can not do backup"
vmNoVolumesToBackupMsg = "vm %s has no volumes to backup"
vmNoChangedBlockTrackingMsg = "vm %s has no ChangedBlockTracking, cannot start backup"
backupTrackerNotFoundMsg = "BackupTracker %s does not exist"
invalidBackupModeMsg = "invalid backup mode: %s"
backupSourceNameEmptyMsg = "Source name is empty"
backupDeletingMsg = "Backup is being deleted"
backupDeletingBeforeVMICompletionMsg = "Backup is being deleted before VMI completion, waiting for completion"
)
var (
errSourceNameEmpty = fmt.Errorf("source name is empty")
)
type VMBackupController struct {
client kubecli.KubevirtClient
backupInformer cache.SharedIndexInformer
backupTrackerInformer cache.SharedIndexInformer
vmStore cache.Store
vmiStore cache.Store
pvcStore cache.Store
recorder record.EventRecorder
backupQueue workqueue.TypedRateLimitingInterface[string]
hasSynced func() bool
}
func NewVMBackupController(client kubecli.KubevirtClient,
backupInformer cache.SharedIndexInformer,
backupTrackerInformer cache.SharedIndexInformer,
vmInformer cache.SharedIndexInformer,
vmiInformer cache.SharedIndexInformer,
pvcInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
) (*VMBackupController, error) {
c := &VMBackupController{
backupQueue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-vmbackup"},
),
backupInformer: backupInformer,
backupTrackerInformer: backupTrackerInformer,
vmStore: vmInformer.GetStore(),
vmiStore: vmiInformer.GetStore(),
pvcStore: pvcInformer.GetStore(),
recorder: recorder,
client: client,
}
c.hasSynced = func() bool {
return backupInformer.HasSynced() && backupTrackerInformer.HasSynced() && vmInformer.HasSynced() && vmiInformer.HasSynced() && pvcInformer.HasSynced()
}
_, err := backupInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.handleBackup,
UpdateFunc: func(oldObj, newObj interface{}) { c.handleBackup(newObj) },
DeleteFunc: c.handleBackup,
},
)
if err != nil {
return nil, err
}
_, err = vmiInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
UpdateFunc: c.handleUpdateVMI,
},
)
if err != nil {
return nil, err
}
_, err = backupTrackerInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.handleBackupTracker,
UpdateFunc: func(oldObj, newObj interface{}) { c.handleBackupTracker(newObj) },
},
)
if err != nil {
return nil, err
}
return c, nil
}
func (ctrl *VMBackupController) handleBackup(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if backup, ok := obj.(*backupv1.VirtualMachineBackup); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(backup)
if err != nil {
log.Log.Errorf("failed to get key from object: %v, %v", err, backup)
return
}
log.Log.V(3).Infof("enqueued %q for sync", objName)
ctrl.backupQueue.Add(objName)
}
}
func cacheKeyFunc(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func (ctrl *VMBackupController) handleUpdateVMI(oldObj, newObj interface{}) {
ovmi, ok := oldObj.(*v1.VirtualMachineInstance)
if !ok {
return
}
nvmi, ok := newObj.(*v1.VirtualMachineInstance)
if !ok {
return
}
if equality.Semantic.DeepEqual(ovmi.Status, nvmi.Status) {
return
}
key := cacheKeyFunc(nvmi.Namespace, nvmi.Name)
// Find backups directly referencing this VMI
keys, err := ctrl.backupInformer.GetIndexer().IndexKeys("vmi", key)
if err != nil {
return
}
for _, key := range keys {
ctrl.backupQueue.Add(key)
}
// Find backups referencing this VMI via BackupTracker
// First find all trackers that reference this VMI
trackerKeys, err := ctrl.backupTrackerInformer.GetIndexer().IndexKeys("vmi", key)
if err != nil {
return
}
// For each tracker, find all backups that reference it
for _, trackerKey := range trackerKeys {
backupKeys, err := ctrl.backupInformer.GetIndexer().IndexKeys("backupTracker", trackerKey)
if err != nil {
continue
}
for _, backupKey := range backupKeys {
ctrl.backupQueue.Add(backupKey)
}
}
}
func (ctrl *VMBackupController) handleBackupTracker(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
tracker, ok := obj.(*backupv1.VirtualMachineBackupTracker)
if !ok {
return
}
key := cacheKeyFunc(tracker.Namespace, tracker.Name)
backupKeys, err := ctrl.backupInformer.GetIndexer().IndexKeys("backupTracker", key)
if err != nil {
return
}
for _, key := range backupKeys {
ctrl.backupQueue.Add(key)
}
}
func (ctrl *VMBackupController) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer ctrl.backupQueue.ShutDown()
log.Log.Info("Starting backup controller.")
defer log.Log.Info("Shutting down backup controller.")
if !cache.WaitForCacheSync(
stopCh,
ctrl.hasSynced,
) {
return fmt.Errorf("failed to wait for caches to sync")
}
for range threadiness {
go wait.Until(ctrl.runWorker, time.Second, stopCh)
}
<-stopCh
return nil
}
func (ctrl *VMBackupController) runWorker() {
for ctrl.Execute() {
}
}
func (ctrl *VMBackupController) Execute() bool {
key, quit := ctrl.backupQueue.Get()
if quit {
return false
}
defer ctrl.backupQueue.Done(key)
err := ctrl.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing VirtualMachineBackup %v", key)
ctrl.backupQueue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed VirtualMachineBackup %v", key)
ctrl.backupQueue.Forget(key)
}
return true
}
type SyncInfo struct {
err error
reason string
event string
checkpointName string
backupType backupv1.BackupType
}
func syncInfoError(err error) *SyncInfo {
return &SyncInfo{err: err}
}
func isIncrementalBackup(backup *backupv1.VirtualMachineBackup, backupTracker *backupv1.VirtualMachineBackupTracker) bool {
return !backup.Spec.ForceFullBackup &&
backupTracker != nil && backupTracker.Status != nil &&
backupTracker.Status.LatestCheckpoint != nil &&
backupTracker.Status.LatestCheckpoint.Name != ""
}
func (ctrl *VMBackupController) execute(key string) error {
logger := log.Log.With("VirtualMachineBackup", key)
logger.V(3).Infof("Processing VirtualMachineBackup %s", key)
storeObj, exists, err := ctrl.backupInformer.GetStore().GetByKey(key)
if err != nil {
logger.Errorf("Error getting backup from store: %v", err)
return err
}
if !exists {
logger.V(3).Infof("Backup %s no longer exists in store", key)
return nil
}
backup, ok := storeObj.(*backupv1.VirtualMachineBackup)
if !ok {
logger.Errorf("Unexpected resource type: %T", storeObj)
return fmt.Errorf("unexpected resource %+v", storeObj)
}
syncInfo := ctrl.sync(backup)
if syncInfo != nil && syncInfo.err != nil {
return syncInfo.err
}
err = ctrl.updateStatus(backup, syncInfo, logger)
if err != nil {
logger.Reason(err).Errorf("Updating the VirtualMachineBackup status failed")
return err
}
logger.V(4).Infof("Successfully processed backup %s", key)
return nil
}
func (ctrl *VMBackupController) sync(backup *backupv1.VirtualMachineBackup) *SyncInfo {
logger := log.Log.With("VirtualMachineBackup", backup.Name)
// If backup is done and not being deleted, nothing to do
if IsBackupDone(backup.Status) && !isBackupDeleting(backup) {
logger.V(4).Info("Backup is already done, skipping reconciliation")
return nil
}
backupTracker, syncInfo := ctrl.getBackupTracker(backup)
if syncInfo != nil {
return syncInfo
}
sourceName := getSourceName(backup, backupTracker)
if sourceName == "" {
logger.Errorf(backupSourceNameEmptyMsg)
return syncInfoError(errSourceNameEmpty)
}
if isBackupDeleting(backup) {
logger.V(3).Info(backupDeletingMsg)
return ctrl.deletionCleanup(backup, sourceName)
}
vmi, syncInfo := ctrl.verifyBackupSource(backup, sourceName)
if syncInfo != nil {
return syncInfo
}
if !isBackupInitializing(backup.Status) || vmi == nil {
return ctrl.checkBackupCompletion(backup, vmi, backupTracker)
}
backup, err := ctrl.addBackupFinalizer(backup)
if err != nil {
err = fmt.Errorf("failed to add finalizer: %w", err)
logger.Error(err.Error())
return syncInfoError(err)
}
if err = ctrl.updateSourceBackupInProgress(vmi, backup.Name); err != nil {
err = fmt.Errorf("failed to update source backup in progress: %w", err)
logger.Error(err.Error())
return syncInfoError(err)
}
backupOptions := backupv1.BackupOptions{
BackupName: backup.Name,
Cmd: backupv1.Start,
BackupStartTime: &backup.CreationTimestamp,
SkipQuiesce: backup.Spec.SkipQuiesce,
}
if backup.Spec.Mode == nil {
backup.Spec.Mode = pointer.P(backupv1.PushMode)
}
switch *backup.Spec.Mode {
case backupv1.PushMode:
pvcName := backup.Spec.PvcName
syncInfo = ctrl.verifyBackupTargetPVC(pvcName, backup.Namespace)
if syncInfo != nil {
return syncInfo
}
volumeName := backupTargetVolumeName(backup.Name)
attached := ctrl.backupTargetPVCAttached(vmi, volumeName)
if !attached {
return ctrl.attachBackupTargetPVC(vmi, *pvcName, volumeName)
}
backupOptions.Mode = backupv1.PushMode
backupOptions.PushPath = pointer.P(hotplugdisk.GetVolumeMountDir(volumeName))
default:
logger.Errorf(invalidBackupModeMsg, *backup.Spec.Mode)
return syncInfoError(fmt.Errorf(invalidBackupModeMsg, *backup.Spec.Mode))
}
logger.Infof("Starting backup for VMI %s with mode %s", vmi.Name, backupOptions.Mode)
backupType := backupv1.Full
if isIncrementalBackup(backup, backupTracker) {
backupOptions.Incremental = pointer.P(backupTracker.Status.LatestCheckpoint.Name)
backupType = backupv1.Incremental
logger.Infof("Setting incremental backup from checkpoint: %s", backupTracker.Status.LatestCheckpoint.Name)
}
err = ctrl.client.VirtualMachineInstance(vmi.Namespace).Backup(context.Background(), vmi.Name, &backupOptions)
if err != nil {
err = fmt.Errorf("failed to send Start backup command: %w", err)
logger.Error(err.Error())
return syncInfoError(err)
}
logger.Infof("Started backup for VMI %s successfully", vmi.Name)
return &SyncInfo{
event: backupInitiatedEvent,
reason: backupInProgress,
backupType: backupType,
}
}
func (ctrl *VMBackupController) updateStatus(backup *backupv1.VirtualMachineBackup, syncInfo *SyncInfo, logger *log.FilteredLogger) error {
backupOut := backup.DeepCopy()
if backup.Status == nil {
backupOut.Status = &backupv1.VirtualMachineBackupStatus{}
updateBackupCondition(backupOut, newInitializingCondition(corev1.ConditionTrue, backupInitializing))
updateBackupCondition(backupOut, newProgressingCondition(corev1.ConditionFalse, backupInitializing))
}
if syncInfo != nil {
// TODO: Handle failure and abort events (backupFailedEvent, backupAbortedEvent)
switch syncInfo.event {
case backupInitializingEvent:
updateBackupCondition(backupOut, newInitializingCondition(corev1.ConditionTrue, syncInfo.reason))
updateBackupCondition(backupOut, newProgressingCondition(corev1.ConditionFalse, syncInfo.reason))
case backupInitiatedEvent:
removeBackupCondition(backupOut, backupv1.ConditionInitializing)
updateBackupCondition(backupOut, newProgressingCondition(corev1.ConditionTrue, syncInfo.reason))
updateBackupCondition(backupOut, newDoneCondition(corev1.ConditionFalse, syncInfo.reason))
if syncInfo.backupType != "" {
backupOut.Status.Type = syncInfo.backupType
}
case backupCompletedEvent, backupCompletedWithWarningEvent:
if syncInfo.event == backupCompletedWithWarningEvent {
ctrl.recorder.Eventf(backupOut, corev1.EventTypeWarning, backupCompletedWithWarningEvent, syncInfo.reason)
} else {
ctrl.recorder.Eventf(backupOut, corev1.EventTypeNormal, backupCompletedEvent, syncInfo.reason)
}
updateBackupCondition(backupOut, newProgressingCondition(corev1.ConditionFalse, syncInfo.reason))
updateBackupCondition(backupOut, newDoneCondition(corev1.ConditionTrue, syncInfo.reason))
if syncInfo.checkpointName != "" {
backupOut.Status.CheckpointName = pointer.P(syncInfo.checkpointName)
}
}
}
if isBackupDeleting(backupOut) && controller.HasFinalizer(backupOut, vmBackupFinalizer) {
logger.Info("update backup is deleting")
updateBackupCondition(backupOut, newDeletingCondition(corev1.ConditionTrue, backupDeleting))
}
if !equality.Semantic.DeepEqual(backup.Status, backupOut.Status) {
if _, err := ctrl.client.VirtualMachineBackup(backupOut.Namespace).UpdateStatus(context.Background(), backupOut, metav1.UpdateOptions{}); err != nil {
logger.Reason(err).Error("failed to update backup status")
return err
}
}
return nil
}
func generateFinalizerPatch(test, replace []string) ([]byte, error) {
return patch.New(
patch.WithTest("/metadata/finalizers", test),
patch.WithReplace("/metadata/finalizers", replace),
).GeneratePayload()
}
func (ctrl *VMBackupController) addBackupFinalizer(backup *backupv1.VirtualMachineBackup) (*backupv1.VirtualMachineBackup, error) {
if controller.HasFinalizer(backup, vmBackupFinalizer) {
return backup, nil
}
cpy := backup.DeepCopy()
controller.AddFinalizer(cpy, vmBackupFinalizer)
patchBytes, err := generateFinalizerPatch(backup.Finalizers, cpy.Finalizers)
if err != nil {
return backup, err
}
return ctrl.client.VirtualMachineBackup(cpy.Namespace).Patch(context.Background(), cpy.Name, k8stypes.JSONPatchType, patchBytes, metav1.PatchOptions{})
}
func (ctrl *VMBackupController) removeBackupFinalizer(backup *backupv1.VirtualMachineBackup) *SyncInfo {
if !controller.HasFinalizer(backup, vmBackupFinalizer) {
return nil
}
cpy := backup.DeepCopy()
controller.RemoveFinalizer(cpy, vmBackupFinalizer)
patchBytes, err := generateFinalizerPatch(backup.Finalizers, cpy.Finalizers)
if err != nil {
err = fmt.Errorf("failed to generate finalizer patch: %w", err)
log.Log.With("VirtualMachineBackup", backup.Name).Error(err.Error())
return syncInfoError(err)
}
_, err = ctrl.client.VirtualMachineBackup(cpy.Namespace).Patch(context.Background(), cpy.Name, k8stypes.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
err = fmt.Errorf("failed to patch backup to remove finalizer: %w", err)
log.Log.With("VirtualMachineBackup", backup.Name).Error(err.Error())
return syncInfoError(err)
}
return nil
}
func getSourceName(backup *backupv1.VirtualMachineBackup, backupTracker *backupv1.VirtualMachineBackupTracker) string {
if backupTracker != nil {
return backupTracker.Spec.Source.Name
}
return backup.Spec.Source.Name
}
func (ctrl *VMBackupController) getBackupTracker(backup *backupv1.VirtualMachineBackup) (*backupv1.VirtualMachineBackupTracker, *SyncInfo) {
if backup.Spec.Source.Kind != backupv1.VirtualMachineBackupTrackerGroupVersionKind.Kind {
return nil, nil
}
objKey := cacheKeyFunc(backup.Namespace, backup.Spec.Source.Name)
obj, exists, err := ctrl.backupTrackerInformer.GetStore().GetByKey(objKey)
if err != nil {
log.Log.With("VirtualMachineBackup", backup.Name).Errorf("Failed to get BackupTracker from store: %v", err)
return nil, syncInfoError(fmt.Errorf("failed to get BackupTracker from store: %w", err))
}
if !exists {
trackerName := backup.Spec.Source.Name
log.Log.With("VirtualMachineBackup", backup.Name).Infof(backupTrackerNotFoundMsg, trackerName)
return nil, &SyncInfo{
event: backupInitializingEvent,
reason: fmt.Sprintf(backupTrackerNotFoundMsg, trackerName),
}
}
tracker, ok := obj.(*backupv1.VirtualMachineBackupTracker)
if !ok {
log.Log.With("VirtualMachineBackup", backup.Name).Errorf("Unexpected object type in BackupTracker store: %T", obj)
return nil, syncInfoError(fmt.Errorf("unexpected object type in BackupTracker store: %T", obj))
}
return tracker, nil
}
func (ctrl *VMBackupController) getVMI(namespace, sourceName string) (*v1.VirtualMachineInstance, bool, error) {
objKey := cacheKeyFunc(namespace, sourceName)
obj, exists, err := ctrl.vmiStore.GetByKey(objKey)
if err != nil {
return nil, false, err
}
if !exists {
return nil, false, nil
}
return obj.(*v1.VirtualMachineInstance), exists, nil
}
func (ctrl *VMBackupController) verifyBackupSource(backup *backupv1.VirtualMachineBackup, sourceName string) (*v1.VirtualMachineInstance, *SyncInfo) {
objKey := cacheKeyFunc(backup.Namespace, sourceName)
_, exists, err := ctrl.vmStore.GetByKey(objKey)
if err != nil {
err = fmt.Errorf("failed to get VM from store: %w", err)
log.Log.With("VirtualMachineBackup", backup.Name).Error(err.Error())
return nil, syncInfoError(err)
}
if !exists {
return nil, &SyncInfo{
event: backupInitializingEvent,
reason: fmt.Sprintf(vmNotFoundMsg, backup.Namespace, sourceName),
}
}
vmi, exists, err := ctrl.getVMI(backup.Namespace, sourceName)
if err != nil {
err = fmt.Errorf("failed to get VMI from store: %w", err)
log.Log.With("VirtualMachineBackup", backup.Name).Error(err.Error())
return nil, syncInfoError(err)
}
if !exists {
return nil, &SyncInfo{
event: backupInitializingEvent,
reason: fmt.Sprintf(vmNotRunningMsg, sourceName),
}
}
hasEligibleVolumes := false
for _, volume := range vmi.Spec.Volumes {
if IsCBTEligibleVolume(&volume) {
hasEligibleVolumes = true
break
}
}
if !hasEligibleVolumes {
return nil, &SyncInfo{
event: backupInitializingEvent,
reason: fmt.Sprintf(vmNoVolumesToBackupMsg, sourceName),
}
}
if vmi.Status.ChangedBlockTracking == nil || vmi.Status.ChangedBlockTracking.State != v1.ChangedBlockTrackingEnabled {
log.Log.With("VirtualMachineBackup", backup.Name).Errorf(vmNoChangedBlockTrackingMsg, sourceName)
return nil, &SyncInfo{
event: backupInitializingEvent,
reason: fmt.Sprintf(vmNoChangedBlockTrackingMsg, sourceName),
}
}
return vmi, nil
}
func (ctrl *VMBackupController) removeSourceBackupInProgress(vmi *v1.VirtualMachineInstance) *SyncInfo {
if !hasVMIBackupStatus(vmi) {
return nil
}
patchBytes, err := patch.New(
patch.WithRemove("/status/changedBlockTracking/backupStatus"),
).GeneratePayload()
if err != nil {
return syncInfoError(err)
}
_, err = ctrl.client.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, k8stypes.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
err = fmt.Errorf("failed to remove BackupInProgress from VMI %s/%s: %w", vmi.Namespace, vmi.Name, err)
log.Log.Error(err.Error())
return syncInfoError(err)
}
return nil
}
func (ctrl *VMBackupController) updateSourceBackupInProgress(vmi *v1.VirtualMachineInstance, backupName string) error {
if hasVMIBackupStatus(vmi) {
if vmi.Status.ChangedBlockTracking.BackupStatus.BackupName != backupName {
return fmt.Errorf("another backup %s is already in progress, cannot start backup %s",
vmi.Status.ChangedBlockTracking.BackupStatus.BackupName, backupName)
}
return nil
}
backupStatus := &v1.VirtualMachineInstanceBackupStatus{
BackupName: backupName,
}
patchSet := patch.New(
patch.WithTest("/status/changedBlockTracking/backupStatus", vmi.Status.ChangedBlockTracking.BackupStatus),
)
if vmi.Status.ChangedBlockTracking.BackupStatus == nil {
patchSet.AddOption(patch.WithAdd("/status/changedBlockTracking/backupStatus", backupStatus))
} else {
patchSet.AddOption(patch.WithReplace("/status/changedBlockTracking/backupStatus", backupStatus))
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
_, err = ctrl.client.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, k8stypes.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
log.Log.Errorf("Failed to update source backup in progress: %s", err)
return err
}
return nil
}
func (ctrl *VMBackupController) checkBackupCompletion(backup *backupv1.VirtualMachineBackup, vmi *v1.VirtualMachineInstance, backupTracker *backupv1.VirtualMachineBackupTracker) *SyncInfo {
// If VMI backup status is missing, perform cleanup
if !hasVMIBackupStatus(vmi) {
_, syncInfo := ctrl.cleanup(backup, vmi)
return syncInfo
}
backupStatus := vmi.Status.ChangedBlockTracking.BackupStatus
if !backupStatus.Completed {
return nil
}
// Update BackupTracker with the new checkpoint if applicable
if backupTracker != nil && backupStatus.CheckpointName != nil {
if err := ctrl.updateBackupTracker(backup.Namespace, backupTracker, backupStatus); err != nil {
log.Log.Object(backup).Reason(err).Error("Failed to update BackupTracker")
return syncInfoError(err)
}
}
log.Log.Object(backup).Info("Backup completed, performing cleanup")
done, syncInfo := ctrl.cleanup(backup, vmi)
if syncInfo != nil {
return syncInfo
}
if !done {
return nil
}
// TODO: Handle backup failure (backupStatus.Failed) and abort status (backupStatus.AbortStatus)
// Check if backup completed with a warning message
if backupStatus.BackupMsg != nil {
log.Log.Object(backup).Infof(backupCompletedWithWarningMsg, *backupStatus.BackupMsg)
syncInfo = &SyncInfo{
event: backupCompletedWithWarningEvent,
reason: fmt.Sprintf(backupCompletedWithWarningMsg, *backupStatus.BackupMsg),
}
} else {
log.Log.Object(backup).Info("Backup completed successfully")
syncInfo = &SyncInfo{
event: backupCompletedEvent,
reason: backupCompleted,
}
}
// We allow tracking checkpoints only if BackupTracker is specified
if backupTracker != nil {
syncInfo.checkpointName = *backupStatus.CheckpointName
}
return syncInfo
}
func (ctrl *VMBackupController) updateBackupTracker(namespace string, tracker *backupv1.VirtualMachineBackupTracker, backupStatus *v1.VirtualMachineInstanceBackupStatus) error {
if tracker == nil {
return nil
}
newCheckpoint := backupv1.BackupCheckpoint{
Name: *backupStatus.CheckpointName,
CreationTime: pointer.P(metav1.Now()),
}
newStatus := &backupv1.VirtualMachineBackupTrackerStatus{
LatestCheckpoint: &newCheckpoint,
}
patchSet := patch.New()
if tracker.Status == nil || tracker.Status.LatestCheckpoint == nil || tracker.Status.LatestCheckpoint.Name == "" {
patchSet.AddOption(patch.WithAdd("/status", newStatus))
} else {
patchSet.AddOption(patch.WithReplace("/status/latestCheckpoint", &newCheckpoint))
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return fmt.Errorf("failed to generate patch payload: %w", err)
}
_, err = ctrl.client.VirtualMachineBackupTracker(namespace).Patch(
context.Background(),
tracker.Name,
k8stypes.JSONPatchType,
patchBytes,
metav1.PatchOptions{},
"status",
)
if err != nil {
return fmt.Errorf("failed to patch BackupTracker status: %w", err)
}
log.Log.Infof("Successfully updated BackupTracker %s/%s with checkpoint %s",
namespace, tracker.Name, newCheckpoint.Name)
log.Log.V(3).Infof("Checkpoint details: name=%s, creationTime=%s",
newCheckpoint.Name, newCheckpoint.CreationTime)
return nil
}
func (ctrl *VMBackupController) deletionCleanup(backup *backupv1.VirtualMachineBackup, sourceName string) *SyncInfo {
vmi, _, err := ctrl.getVMI(backup.Namespace, sourceName)
if err != nil {
err = fmt.Errorf("failed to get VMI during deletion cleanup: %w", err)
log.Log.With("VirtualMachineBackup", backup.Name).Error(err.Error())
return syncInfoError(err)
}
vmiBackupInProgress := hasVMIBackupStatus(vmi) &&
vmi.Status.ChangedBlockTracking.BackupStatus.BackupName == backup.Name &&
!vmi.Status.ChangedBlockTracking.BackupStatus.Completed
if vmiBackupInProgress {
log.Log.With("VirtualMachineBackup", backup.Name).V(3).Info(backupDeletingBeforeVMICompletionMsg)
// TODO: abort running backup on deletion instead of waiting for completion
return nil
}
done, syncInfo := ctrl.cleanup(backup, vmi)
if syncInfo != nil {
return syncInfo
}
if !done {
return syncInfoError(fmt.Errorf("cleanup not yet complete for deleted backup"))
}
return nil
}
func isPushMode(backup *backupv1.VirtualMachineBackup) bool {
return backup.Spec.Mode == nil || *backup.Spec.Mode == backupv1.PushMode
}
func (ctrl *VMBackupController) cleanup(backup *backupv1.VirtualMachineBackup, vmi *v1.VirtualMachineInstance) (bool, *SyncInfo) {
if isPushMode(backup) {
volumeName := backupTargetVolumeName(backup.Name)
detached := ctrl.backupTargetPVCDetached(vmi, volumeName)
if !detached {
return false, ctrl.detachBackupTargetPVC(vmi, volumeName)
}
}
syncInfo := ctrl.removeSourceBackupInProgress(vmi)
if syncInfo != nil {
return false, syncInfo
}
if isBackupDeleting(backup) {
if syncInfo := ctrl.removeBackupFinalizer(backup); syncInfo != nil {
return false, syncInfo
}
}
return true, nil
}
func isBackupInitializing(status *backupv1.VirtualMachineBackupStatus) bool {
return status == nil || hasCondition(status.Conditions, backupv1.ConditionInitializing)
}
func IsBackupDone(status *backupv1.VirtualMachineBackupStatus) bool {
return status != nil && hasCondition(status.Conditions, backupv1.ConditionDone)
}
func updateCondition(conditions []backupv1.Condition, c backupv1.Condition) []backupv1.Condition {
found := false
for i := range conditions {
if conditions[i].Type == c.Type {
if conditions[i].Status != c.Status || conditions[i].Reason != c.Reason || conditions[i].Message != c.Message {
conditions[i] = c
}
found = true
break
}
}
if !found {
conditions = append(conditions, c)
}
return conditions
}
func newCondition(condType backupv1.ConditionType, status corev1.ConditionStatus, reason string) backupv1.Condition {
return backupv1.Condition{
Type: condType,
Status: status,
Reason: reason,
LastTransitionTime: metav1.Now(),
}
}
func newInitializingCondition(status corev1.ConditionStatus, reason string) backupv1.Condition {
return newCondition(backupv1.ConditionInitializing, status, reason)
}
func newDoneCondition(status corev1.ConditionStatus, reason string) backupv1.Condition {
return newCondition(backupv1.ConditionDone, status, reason)
}
func newProgressingCondition(status corev1.ConditionStatus, reason string) backupv1.Condition {
return newCondition(backupv1.ConditionProgressing, status, reason)
}
func newDeletingCondition(status corev1.ConditionStatus, reason string) backupv1.Condition {
return newCondition(backupv1.ConditionDeleting, status, reason)
}
func hasCondition(conditions []backupv1.Condition, condType backupv1.ConditionType) bool {
for _, cond := range conditions {
if cond.Type == condType {
return cond.Status == corev1.ConditionTrue
}
}
return false
}
func updateBackupCondition(b *backupv1.VirtualMachineBackup, c backupv1.Condition) {
b.Status.Conditions = updateCondition(b.Status.Conditions, c)
}
func removeBackupCondition(b *backupv1.VirtualMachineBackup, cType backupv1.ConditionType) {
var conds []backupv1.Condition
for _, c := range b.Status.Conditions {
if c.Type == cType {
continue
}
conds = append(conds, c)
}
b.Status.Conditions = conds
}
func isBackupDeleting(backup *backupv1.VirtualMachineBackup) bool {
return backup != nil && backup.DeletionTimestamp != nil
}
func hasVMIBackupStatus(vmi *v1.VirtualMachineInstance) bool {
return vmi != nil && vmi.Status.ChangedBlockTracking != nil && vmi.Status.ChangedBlockTracking.BackupStatus != nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package cbt
import (
"path/filepath"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
var (
CBTKey = "changedBlockTracking"
CBTLabel = map[string]string{"changedBlockTracking": "true"}
)
func CBTState(status *v1.ChangedBlockTrackingStatus) v1.ChangedBlockTrackingState {
if status == nil {
return v1.ChangedBlockTrackingUndefined
}
return status.State
}
func SetCBTState(status **v1.ChangedBlockTrackingStatus, state v1.ChangedBlockTrackingState) {
if status == nil {
return
}
if *status == nil {
*status = &v1.ChangedBlockTrackingStatus{}
}
(*status).State = state
}
func CompareCBTState(status *v1.ChangedBlockTrackingStatus, state v1.ChangedBlockTrackingState) bool {
return CBTState(status) == state
}
func cbtStateUndefined(status *v1.ChangedBlockTrackingStatus) bool {
return status == nil || CompareCBTState(status, v1.ChangedBlockTrackingUndefined)
}
func cbtStateDisabled(status *v1.ChangedBlockTrackingStatus) bool {
return CompareCBTState(status, v1.ChangedBlockTrackingUndefined) ||
CompareCBTState(status, v1.ChangedBlockTrackingDisabled)
}
func HasCBTStateEnabled(status *v1.ChangedBlockTrackingStatus) bool {
return CompareCBTState(status, v1.ChangedBlockTrackingInitializing) ||
CompareCBTState(status, v1.ChangedBlockTrackingEnabled)
}
// vmMatchesChangedBlockTrackingSelectors checks if a VM should have CBT enabled based on cluster config
func vmMatchesChangedBlockTrackingSelectors(vm *v1.VirtualMachine, clusterConfig *virtconfig.ClusterConfig, nsStore cache.Store) bool {
labelSelectors := clusterConfig.GetConfig().ChangedBlockTrackingLabelSelectors
if labelSelectors == nil {
return false
}
logger := log.Log.Object(vm)
vmSelector := labelSelectors.VirtualMachineLabelSelector
namespaceSelector := labelSelectors.NamespaceLabelSelector
return vmMatchesVMSelector(vmSelector, vm.Labels, logger) ||
vmMatchesNamespaceSelector(namespaceSelector, vm.Namespace, nsStore, logger)
}
func vmMatchesVMSelector(labelSelector *metav1.LabelSelector, vmLabels map[string]string, logger *log.FilteredLogger) bool {
if labelSelector == nil {
return false
}
vmSelector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
logger.Reason(err).Warning("invalid changedBlockTracking virtualMachineSelector set, assuming none")
return false
}
return vmSelector.Matches(labels.Set(vmLabels))
}
func vmMatchesNamespaceSelector(labelSelector *metav1.LabelSelector, namespace string, nsStore cache.Store, logger *log.FilteredLogger) bool {
if labelSelector == nil {
return false
}
nsSelector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
logger.Reason(err).Warning("invalid changedBlockTracking namespaceSelector set, assuming none")
return false
}
ns := getNamespaceFromStore(namespace, nsStore, logger)
if ns == nil {
return false
}
return nsSelector.Matches(labels.Set(ns.Labels))
}
func getNamespaceFromStore(namespace string, nsStore cache.Store, logger *log.FilteredLogger) *k8sv1.Namespace {
if nsStore == nil {
logger.Warning("namespace informer not available")
return nil
}
obj, exists, err := nsStore.GetByKey(namespace)
if err != nil {
logger.Reason(err).Warning("failed to retrieve namespace from informer")
return nil
}
if !exists {
logger.Warningf("namespace %s not found in informer", namespace)
return nil
}
ns, ok := obj.(*k8sv1.Namespace)
if !ok {
logger.Errorf("failed to cast object to Namespace: %+v", obj)
return nil
}
return ns
}
func SyncVMChangedBlockTrackingState(vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance, clusterConfig *virtconfig.ClusterConfig, nsStore cache.Store) {
// If the status is already updated to ChangedBlockTrackingFGDisabled and the FG is still
// disabled, skip.
if CompareCBTState(vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingFGDisabled) &&
!clusterConfig.IncrementalBackupEnabled() {
return
}
vmMatchesSelector := vmMatchesChangedBlockTrackingSelectors(vm, clusterConfig, nsStore)
if !clusterConfig.IncrementalBackupEnabled() {
handleChangedBlockTrackingFGDisabled(vm, vmi, vmMatchesSelector)
return
}
if vmMatchesSelector {
enableChangedBlockTracking(vm, vmi)
} else {
disableChangedBlockTracking(vm, vmi)
}
}
func enableChangedBlockTracking(vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance) {
if vmi != nil {
enableChangedBlockTrackingVMIExists(vm, vmi)
} else {
enableChangedBlockTrackingNoVMI(vm)
}
}
// enableChangedBlockTrackingVMIExists manages CBT state when both VM and VMI exist
func enableChangedBlockTrackingVMIExists(vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance) {
vmState := CBTState(vm.Status.ChangedBlockTracking)
vmiState := CBTState(vmi.Status.ChangedBlockTracking)
switch vmState {
case v1.ChangedBlockTrackingUndefined, v1.ChangedBlockTrackingFGDisabled:
// New CBT request - need restart to enable
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingPendingRestart)
case v1.ChangedBlockTrackingPendingRestart, v1.ChangedBlockTrackingDisabled:
// VM waiting for restart or disabled - check VMI state
switch vmiState {
case v1.ChangedBlockTrackingInitializing:
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingInitializing)
case v1.ChangedBlockTrackingEnabled:
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingEnabled)
default:
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingPendingRestart)
}
case v1.ChangedBlockTrackingInitializing, v1.ChangedBlockTrackingEnabled:
// VM actively using CBT - sync with VMI state
switch vmiState {
case v1.ChangedBlockTrackingEnabled:
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingEnabled)
default:
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingInitializing)
}
default:
resetInvalidState(vm)
}
}
// enableChangedBlockTrackingNoVMI manages CBT state when only VM exists (no VMI)
func enableChangedBlockTrackingNoVMI(vm *v1.VirtualMachine) {
vmState := CBTState(vm.Status.ChangedBlockTracking)
switch vmState {
case v1.ChangedBlockTrackingUndefined,
v1.ChangedBlockTrackingPendingRestart,
v1.ChangedBlockTrackingInitializing,
v1.ChangedBlockTrackingDisabled,
v1.ChangedBlockTrackingFGDisabled:
// VM without VMI - set to initializing
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingInitializing)
case v1.ChangedBlockTrackingEnabled:
// Keep enabled state when no VMI exists
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingEnabled)
default:
resetInvalidState(vm)
}
}
// disableChangedBlockTracking handles disabling CBT for VMs that no longer match selectors
func disableChangedBlockTracking(vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance) {
// No action needed if VM cbtState is already undefined or disabled
if cbtStateDisabled(vm.Status.ChangedBlockTracking) {
return
}
// Disable immediately if no VMI or VMI cbtState is undefined or disabled
if vmi == nil || cbtStateDisabled(vmi.Status.ChangedBlockTracking) {
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingDisabled)
return
}
// Handle active states that need to transition through restart
switch CBTState(vm.Status.ChangedBlockTracking) {
case v1.ChangedBlockTrackingPendingRestart,
v1.ChangedBlockTrackingInitializing,
v1.ChangedBlockTrackingEnabled,
v1.ChangedBlockTrackingFGDisabled:
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingPendingRestart)
default:
resetInvalidState(vm)
}
}
func handleChangedBlockTrackingFGDisabled(vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance, vmMatchesSelector bool) {
if vmi == nil || cbtStateDisabled(vmi.Status.ChangedBlockTracking) {
if vmMatchesSelector {
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingFGDisabled)
} else {
vm.Status.ChangedBlockTracking = nil
}
return
}
// VMI exists with CBT enabled - need to go through restart first
if HasCBTStateEnabled(vm.Status.ChangedBlockTracking) {
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingPendingRestart)
}
}
func resetInvalidState(vm *v1.VirtualMachine) {
log.Log.Object(vm).Warningf("invalid changedBlockTracking state %s, resetting to undefined", vm.Status.ChangedBlockTracking.State)
SetCBTState(&vm.Status.ChangedBlockTracking, v1.ChangedBlockTrackingUndefined)
}
func SetChangedBlockTrackingOnVMI(vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance, clusterConfig *virtconfig.ClusterConfig, nsStore cache.Store) {
if !clusterConfig.IncrementalBackupEnabled() {
return
}
vmMatchesSelector := vmMatchesChangedBlockTrackingSelectors(vm, clusterConfig, nsStore)
if vmMatchesSelector {
SetCBTState(&vmi.Status.ChangedBlockTracking, v1.ChangedBlockTrackingInitializing)
} else if !cbtStateUndefined(vm.Status.ChangedBlockTracking) {
SetCBTState(&vmi.Status.ChangedBlockTracking, v1.ChangedBlockTrackingDisabled)
}
}
func IsCBTEligibleVolume(volume *v1.Volume) bool {
return volume.VolumeSource.PersistentVolumeClaim != nil ||
volume.VolumeSource.DataVolume != nil ||
volume.VolumeSource.HostDisk != nil
}
func SetChangedBlockTrackingOnVMIFromDomain(vmi *v1.VirtualMachineInstance, domain *api.Domain) {
if domain == nil || vmi.Status.ChangedBlockTracking == nil || cbtStateDisabled(vmi.Status.ChangedBlockTracking) {
return
}
cbtSet := true
for _, volume := range vmi.Spec.Volumes {
if !IsCBTEligibleVolume(&volume) {
continue
}
found := false
for _, disk := range domain.Spec.Devices.Disks {
if disk.Alias.GetName() == volume.Name {
found = true
if disk.Source.DataStore == nil {
cbtSet = false
}
break
}
}
// If we didn't find a matching disk for an eligible volume, disable CBT
if !found {
cbtSet = false
break
}
}
if cbtSet {
SetCBTState(&vmi.Status.ChangedBlockTracking, v1.ChangedBlockTrackingEnabled)
}
}
func PathForCBT(vmi *v1.VirtualMachineInstance) string {
cbtPath := "/var/lib/libvirt/qemu/cbt"
if util.IsNonRootVMI(vmi) {
cbtPath = filepath.Join(util.VirtPrivateDir, "libvirt", "qemu", "cbt")
}
return cbtPath
}
func GetQCOW2OverlayPath(vmi *v1.VirtualMachineInstance, volumeName string) string {
cbtPath := PathForCBT(vmi)
return filepath.Join(cbtPath, volumeName+".qcow2")
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package cbt
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/storage/types"
)
const (
backupTargetPVCPrefix = "backup-target-pvc"
)
func backupTargetVolumeName(backupName string) string {
return fmt.Sprintf("%s-%s", backupName, backupTargetPVCPrefix)
}
var (
failedTargetPVCAttach = "failed to attach target backup pvc: %s"
failedTargetPVCDetach = "failed to detach target backup pvc: %s"
attachTargetPVCMsg = "attaching backup target pvc %s to vmi %s"
attachInProgressMsg = "backup target PVC %s is being attached to VMI %s"
detachTargetPVCMsg = "detaching backup target pvc from vmi %s"
backupTargetPVCBlockModeMsg = "backup target PVC must be a filesystem PVC, provided pvc %s/%s is block"
pvcNotFoundMsg = "PVC %s/%s doesnt exist"
backupTargetPVCNameNilMsg = "backup target PVC name is nil"
)
func (ctrl *VMBackupController) verifyBackupTargetPVC(pvcName *string, namespace string) *SyncInfo {
if pvcName == nil {
log.Log.Error(backupTargetPVCNameNilMsg)
return syncInfoError(fmt.Errorf("%s", backupTargetPVCNameNilMsg))
}
objKey := cacheKeyFunc(namespace, *pvcName)
obj, exists, err := ctrl.pvcStore.GetByKey(objKey)
if err != nil {
err = fmt.Errorf("error getting PVC from store: %w", err)
log.Log.Error(err.Error())
return syncInfoError(err)
}
if !exists {
return &SyncInfo{
event: backupInitializingEvent,
reason: fmt.Sprintf(pvcNotFoundMsg, namespace, *pvcName),
}
}
pvc := obj.(*corev1.PersistentVolumeClaim)
if types.IsPVCBlock(pvc.Spec.VolumeMode) {
return syncInfoError(fmt.Errorf(backupTargetPVCBlockModeMsg, namespace, *pvcName))
}
return nil
}
func (ctrl *VMBackupController) backupTargetPVCAttached(vmi *v1.VirtualMachineInstance, volumeName string) bool {
if vmi == nil {
return false
}
for _, volumeStatus := range vmi.Status.VolumeStatus {
if volumeStatus.Name == volumeName {
return volumeStatus.HotplugVolume != nil && volumeStatus.Phase == v1.HotplugVolumeMounted
}
}
return false
}
func (ctrl *VMBackupController) backupTargetPVCDetached(vmi *v1.VirtualMachineInstance, volumeName string) bool {
if vmi == nil {
return true
}
for _, vol := range vmi.Spec.UtilityVolumes {
if vol.Name == volumeName {
return false
}
}
for _, volumeStatus := range vmi.Status.VolumeStatus {
if volumeStatus.Name == volumeName {
return false
}
}
return true
}
func (ctrl *VMBackupController) attachBackupTargetPVC(vmi *v1.VirtualMachineInstance, pvcName string, volumeName string) *SyncInfo {
// Check if we already patched the VMI with the utilityVolume
for _, vol := range vmi.Spec.UtilityVolumes {
if vol.Name == volumeName {
return &SyncInfo{
event: backupInitializingEvent,
reason: fmt.Sprintf(attachInProgressMsg, pvcName, vmi.Name),
}
}
}
backupVolume := v1.UtilityVolume{
Name: volumeName,
PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
Type: pointer.P(v1.Backup),
}
patchSet := patch.New(
patch.WithTest("/spec/utilityVolumes", vmi.Spec.UtilityVolumes),
)
newUtilityVolumes := append(vmi.Spec.UtilityVolumes, backupVolume)
if len(vmi.Spec.UtilityVolumes) > 0 {
patchSet.AddOption(patch.WithReplace("/spec/utilityVolumes", newUtilityVolumes))
} else {
patchSet.AddOption(patch.WithAdd("/spec/utilityVolumes", newUtilityVolumes))
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
err = fmt.Errorf("failed to generate attach backup target PVC patch: %w", err)
log.Log.Error(err.Error())
return syncInfoError(err)
}
_, err = ctrl.client.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, k8stypes.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
failedPatchErr := fmt.Errorf(failedTargetPVCAttach, err)
log.Log.Object(vmi).Errorf("%s", failedPatchErr.Error())
return syncInfoError(failedPatchErr)
}
pvcAttachMsg := fmt.Sprintf(attachTargetPVCMsg, pvcName, vmi.Name)
log.Log.Object(vmi).Infof("%s", pvcAttachMsg)
return &SyncInfo{
event: backupInitializingEvent,
reason: pvcAttachMsg,
}
}
func (ctrl *VMBackupController) detachBackupTargetPVC(vmi *v1.VirtualMachineInstance, volumeName string) *SyncInfo {
if len(vmi.Spec.UtilityVolumes) == 0 {
return nil
}
newUtilityVolumes := make([]v1.UtilityVolume, 0, len(vmi.Spec.UtilityVolumes))
for _, vol := range vmi.Spec.UtilityVolumes {
if vol.Name != volumeName {
newUtilityVolumes = append(newUtilityVolumes, vol)
}
}
patchSet := patch.New(
patch.WithTest("/spec/utilityVolumes", vmi.Spec.UtilityVolumes),
)
if len(newUtilityVolumes) == 0 {
patchSet.AddOption(patch.WithRemove("/spec/utilityVolumes"))
} else {
patchSet.AddOption(patch.WithReplace("/spec/utilityVolumes", newUtilityVolumes))
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
failedPatchErr := fmt.Errorf(failedTargetPVCDetach, err)
log.Log.Object(vmi).Errorf("Failed to generate patch: %s", failedPatchErr.Error())
return syncInfoError(failedPatchErr)
}
_, err = ctrl.client.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, k8stypes.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
failedPatchErr := fmt.Errorf(failedTargetPVCDetach, err)
log.Log.Object(vmi).Errorf("Failed to patch VMI: %s", failedPatchErr.Error())
return syncInfoError(failedPatchErr)
}
pvcDetachMsg := fmt.Sprintf(detachTargetPVCMsg, vmi.Name)
log.Log.Object(vmi).Infof("%s", pvcDetachMsg)
return &SyncInfo{
event: backupInitiatedEvent,
reason: pvcDetachMsg,
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package hotplug
import (
"context"
"k8s.io/apimachinery/pkg/api/equality"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
)
func HandleDeclarativeVolumes(client kubecli.KubevirtClient, vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if vm.Spec.UpdateVolumesStrategy != nil && *vm.Spec.UpdateVolumesStrategy == virtv1.UpdateVolumesStrategyMigration {
// Are there some cases we can proceed?
return nil
}
if err := patchHotplugVolumes(client, vm, vmi); err != nil {
log.Log.Object(vm).Errorf("failed to update hotplug volumes for vmi:%v", err)
return err
}
return nil
}
func patchHotplugVolumes(client kubecli.KubevirtClient, vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil || !vmi.IsRunning() {
return nil
}
newVmiVolumes := append(filterHotplugVMIVolumes(vm, vmi), getNewHotplugVMVolumes(vm, vmi)...)
newVmiDisks := append(filterHotplugVMIDisks(vm, vmi, newVmiVolumes), getNewHotplugVMDisks(vm, vmi, newVmiVolumes)...)
if equality.Semantic.DeepEqual(vmi.Spec.Volumes, newVmiVolumes) &&
equality.Semantic.DeepEqual(vmi.Spec.Domain.Devices.Disks, newVmiDisks) {
log.Log.Object(vm).V(3).Info("No hotplug volumes to patch")
return nil
}
patchSet := patch.New(
patch.WithTest("/spec/volumes", vmi.Spec.Volumes),
patch.WithTest("/spec/domain/devices/disks", vmi.Spec.Domain.Devices.Disks),
)
if len(vmi.Spec.Volumes) > 0 {
patchSet.AddOption(patch.WithReplace("/spec/volumes", newVmiVolumes))
} else {
patchSet.AddOption(patch.WithAdd("/spec/volumes", newVmiVolumes))
}
if len(vmi.Spec.Domain.Devices.Disks) > 0 {
patchSet.AddOption(patch.WithReplace("/spec/domain/devices/disks", newVmiDisks))
} else {
patchSet.AddOption(patch.WithAdd("/spec/domain/devices/disks", newVmiDisks))
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
_, err = client.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
if err != nil {
return err
}
return nil
}
func filterHotplugVMIVolumes(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) []virtv1.Volume {
var volumes []virtv1.Volume
vmVolumesByName := storagetypes.GetVolumesByName(&vm.Spec.Template.Spec)
// remove any volumes missing/changed in the VM spec
for _, vmiVolume := range vmi.Spec.Volumes {
if storagetypes.IsDeclarativeHotplugVolume(&vmiVolume) {
vmVolume, exists := vmVolumesByName[vmiVolume.Name]
if !exists {
// volume not in VM spec, remove it
log.Log.Object(vm).Infof("Removing hotplug volume %s from VMI, no longer in VM", vmiVolume.Name)
continue
}
// volume changed in VM spec - remove it to be re-added with new values later
if storagetypes.IsDeclarativeHotplugVolume(vmVolume) && !equality.Semantic.DeepEqual(vmVolume, &vmiVolume) {
log.Log.Object(vm).Infof("Removing hotplug volume %s from VMI, volume changed", vmiVolume.Name)
continue
}
}
volumes = append(volumes, *vmiVolume.DeepCopy())
}
return volumes
}
func getNewHotplugVMVolumes(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) []virtv1.Volume {
var volumes []virtv1.Volume
vmiVolumesByName := storagetypes.GetVolumesByName(&vmi.Spec)
var volumesWithStatus = make(map[string]struct{})
for _, vs := range vmi.Status.VolumeStatus {
volumesWithStatus[vs.Name] = struct{}{}
}
for _, vmVolume := range vm.Spec.Template.Spec.Volumes {
if storagetypes.IsDeclarativeHotplugVolume(&vmVolume) {
_, vmiVolumeExists := vmiVolumesByName[vmVolume.Name]
// vmi will report status on volume after removed from spec
// if in process of hot unplugging
_, vmiVolumeHasStatus := volumesWithStatus[vmVolume.Name]
if !vmiVolumeExists && !vmiVolumeHasStatus {
log.Log.Object(vm).Infof("Adding hotplug volume %s to VMI", vmVolume.Name)
volumes = append(volumes, *vmVolume.DeepCopy())
}
}
}
return volumes
}
func volumesByName(volumes []virtv1.Volume) map[string]*virtv1.Volume {
volumeMap := make(map[string]*virtv1.Volume)
for _, v := range volumes {
volumeMap[v.Name] = v.DeepCopy()
}
return volumeMap
}
func filterHotplugVMIDisks(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance, vmiNewVolumes []virtv1.Volume) []virtv1.Disk {
var disks []virtv1.Disk
vmiNewVolumesByName := volumesByName(vmiNewVolumes)
vmDisksByName := storagetypes.GetDisksByName(&vm.Spec.Template.Spec)
vmVolumesByName := storagetypes.GetVolumesByName(&vm.Spec.Template.Spec)
for _, vmiDisk := range vmi.Spec.Domain.Devices.Disks {
_, vmiVolumeExists := vmiNewVolumesByName[vmiDisk.Name]
if !vmiVolumeExists {
vmDisk, vmDiskExists := vmDisksByName[vmiDisk.Name]
_, vmVolumeExists := vmVolumesByName[vmiDisk.Name]
vmiIsCDRom := vmiDisk.CDRom != nil
vmIsCDRom := vmDiskExists && vmDisk.CDRom != nil
// disk and volume are gone
if !vmDiskExists {
continue
}
// volume changed, remove if not CD-ROM
if vmVolumeExists && (!vmIsCDRom || !vmiIsCDRom) {
continue
}
}
disks = append(disks, *vmiDisk.DeepCopy())
}
return disks
}
func getNewHotplugVMDisks(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance, vmiNewVolumes []virtv1.Volume) []virtv1.Disk {
var disks []virtv1.Disk
vmiNewVolumesByName := volumesByName(vmiNewVolumes)
vmiDisksByName := storagetypes.GetDisksByName(&vmi.Spec)
for _, vmDisk := range vm.Spec.Template.Spec.Domain.Devices.Disks {
vmVolume, vmVolumeExists := vmiNewVolumesByName[vmDisk.Name]
_, vmiDiskExists := vmiDisksByName[vmDisk.Name]
if vmVolumeExists && storagetypes.IsDeclarativeHotplugVolume(vmVolume) && !vmiDiskExists {
log.Log.Object(vm).Infof("Adding hotplug disk %s to VMI", vmDisk.Name)
disks = append(disks, *vmDisk.DeepCopy())
}
}
return disks
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package memorydump
import (
"context"
"fmt"
k8score "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
)
const (
ErrorReason = "MemoryDumpError"
failed = "Memory dump failed"
)
func HasCompleted(vm *v1.VirtualMachine) bool {
return vm.Status.MemoryDumpRequest != nil && vm.Status.MemoryDumpRequest.Phase != v1.MemoryDumpAssociating && vm.Status.MemoryDumpRequest.Phase != v1.MemoryDumpInProgress
}
func RemoveMemoryDumpVolumeFromVMISpec(vmiSpec *v1.VirtualMachineInstanceSpec, claimName string) *v1.VirtualMachineInstanceSpec {
newVolumesList := []v1.Volume{}
for _, volume := range vmiSpec.Volumes {
if volume.Name != claimName {
newVolumesList = append(newVolumesList, volume)
}
}
vmiSpec.Volumes = newVolumesList
return vmiSpec
}
func HandleRequest(client kubecli.KubevirtClient, vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance, pvcStore cache.Store) error {
if vm.Status.MemoryDumpRequest == nil {
return nil
}
vmiVolumeMap := make(map[string]v1.Volume)
if vmi != nil {
for _, volume := range vmi.Spec.Volumes {
vmiVolumeMap[volume.Name] = volume
}
}
switch vm.Status.MemoryDumpRequest.Phase {
case v1.MemoryDumpAssociating:
if vmi == nil || vmi.DeletionTimestamp != nil || !vmi.IsRunning() {
return nil
}
// When in state associating we want to add the memory dump pvc
// as a volume in the vm and in the vmi to trigger the mount
// to virt launcher and the memory dump
vm.Spec.Template.Spec = *applyMemoryDumpVolumeRequestOnVMISpec(&vm.Spec.Template.Spec, vm.Status.MemoryDumpRequest.ClaimName)
if _, exists := vmiVolumeMap[vm.Status.MemoryDumpRequest.ClaimName]; exists {
return nil
}
if err := generateVMIMemoryDumpVolumePatch(client, vmi, vm.Status.MemoryDumpRequest, true); err != nil {
log.Log.Object(vmi).Errorf("unable to patch vmi to add memory dump volume: %v", err)
return err
}
case v1.MemoryDumpUnmounting, v1.MemoryDumpFailed:
if err := patchMemoryDumpPVCAnnotation(client, vm, pvcStore); err != nil {
return err
}
// Check if the memory dump is in the vmi list of volumes,
// if it still there remove it to make it unmount from virt launcher
if _, exists := vmiVolumeMap[vm.Status.MemoryDumpRequest.ClaimName]; !exists {
return nil
}
if err := generateVMIMemoryDumpVolumePatch(client, vmi, vm.Status.MemoryDumpRequest, false); err != nil {
log.Log.Object(vmi).Errorf("unable to patch vmi to remove memory dump volume: %v", err)
return err
}
case v1.MemoryDumpDissociating:
// Check if the memory dump is in the vmi list of volumes,
// if it still there remove it to make it unmount from virt launcher
if _, exists := vmiVolumeMap[vm.Status.MemoryDumpRequest.ClaimName]; exists {
if err := generateVMIMemoryDumpVolumePatch(client, vmi, vm.Status.MemoryDumpRequest, false); err != nil {
log.Log.Object(vmi).Errorf("unable to patch vmi to remove memory dump volume: %v", err)
return err
}
}
vm.Spec.Template.Spec = *RemoveMemoryDumpVolumeFromVMISpec(&vm.Spec.Template.Spec, vm.Status.MemoryDumpRequest.ClaimName)
}
return nil
}
func UpdateRequest(vm *v1.VirtualMachine, vmi *v1.VirtualMachineInstance) {
if vm.Status.MemoryDumpRequest == nil {
return
}
updatedMemoryDumpReq := vm.Status.MemoryDumpRequest.DeepCopy()
if vm.Status.MemoryDumpRequest.Remove {
updatedMemoryDumpReq.Phase = v1.MemoryDumpDissociating
}
switch vm.Status.MemoryDumpRequest.Phase {
case v1.MemoryDumpCompleted:
// Once memory dump completed, there is no update neeeded,
// A new update will come from the subresource API once
// a new request will be issued
return
case v1.MemoryDumpAssociating:
// Update Phase to InProgrees once the memory dump
// is in the list of vm volumes
for _, volume := range vm.Spec.Template.Spec.Volumes {
if vm.Status.MemoryDumpRequest.ClaimName == volume.Name {
updatedMemoryDumpReq.Phase = v1.MemoryDumpInProgress
break
}
}
case v1.MemoryDumpInProgress:
// Update to unmounting once getting update in the vmi volume status
// that the dump timestamp is updated
if vmi != nil && len(vmi.Status.VolumeStatus) > 0 {
for _, volumeStatus := range vmi.Status.VolumeStatus {
if volumeStatus.Name == vm.Status.MemoryDumpRequest.ClaimName &&
volumeStatus.MemoryDumpVolume != nil {
if volumeStatus.MemoryDumpVolume.StartTimestamp != nil {
updatedMemoryDumpReq.StartTimestamp = volumeStatus.MemoryDumpVolume.StartTimestamp
}
if volumeStatus.Phase == v1.MemoryDumpVolumeCompleted {
updatedMemoryDumpReq.Phase = v1.MemoryDumpUnmounting
updatedMemoryDumpReq.EndTimestamp = volumeStatus.MemoryDumpVolume.EndTimestamp
updatedMemoryDumpReq.FileName = &volumeStatus.MemoryDumpVolume.TargetFileName
} else if volumeStatus.Phase == v1.MemoryDumpVolumeFailed {
updatedMemoryDumpReq.Phase = v1.MemoryDumpFailed
updatedMemoryDumpReq.Message = volumeStatus.Message
updatedMemoryDumpReq.EndTimestamp = volumeStatus.MemoryDumpVolume.EndTimestamp
}
}
}
}
case v1.MemoryDumpUnmounting:
// Update memory dump as completed once the memory dump has been
// unmounted - not a part of the vmi volume status
if vmi != nil {
for _, volumeStatus := range vmi.Status.VolumeStatus {
// If we found the claim name in the vmi volume status
// then the pvc is still mounted
if volumeStatus.Name == vm.Status.MemoryDumpRequest.ClaimName {
return
}
}
}
updatedMemoryDumpReq.Phase = v1.MemoryDumpCompleted
case v1.MemoryDumpDissociating:
// Make sure the memory dump is not in the vmi list of volumes
if vmi != nil {
for _, volumeStatus := range vmi.Status.VolumeStatus {
if volumeStatus.Name == vm.Status.MemoryDumpRequest.ClaimName {
return
}
}
}
// Make sure the memory dump is not in the list of vm volumes
for _, volume := range vm.Spec.Template.Spec.Volumes {
if vm.Status.MemoryDumpRequest.ClaimName == volume.Name {
return
}
}
// Remove the memory dump request
updatedMemoryDumpReq = nil
}
vm.Status.MemoryDumpRequest = updatedMemoryDumpReq
}
func generateVMIMemoryDumpVolumePatch(client kubecli.KubevirtClient, vmi *v1.VirtualMachineInstance, request *v1.VirtualMachineMemoryDumpRequest, addVolume bool) error {
foundRemoveVol := false
for _, volume := range vmi.Spec.Volumes {
if request.ClaimName == volume.Name {
if addVolume {
return fmt.Errorf("Unable to add volume [%s] because it already exists", volume.Name)
} else {
foundRemoveVol = true
}
}
}
if !foundRemoveVol && !addVolume {
return fmt.Errorf("Unable to remove volume [%s] because it does not exist", request.ClaimName)
}
vmiCopy := vmi.DeepCopy()
if addVolume {
vmiCopy.Spec = *applyMemoryDumpVolumeRequestOnVMISpec(&vmiCopy.Spec, request.ClaimName)
} else {
vmiCopy.Spec = *RemoveMemoryDumpVolumeFromVMISpec(&vmiCopy.Spec, request.ClaimName)
}
patchset := patch.New(
patch.WithTest("/spec/volumes", vmi.Spec.Volumes),
)
if len(vmi.Spec.Volumes) > 0 {
patchset.AddOption(patch.WithReplace("/spec/volumes", vmiCopy.Spec.Volumes))
} else {
patchset.AddOption(patch.WithAdd("/spec/volumes", vmiCopy.Spec.Volumes))
}
patchBytes, err := patchset.GeneratePayload()
if err != nil {
return err
}
_, err = client.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
return err
}
func applyMemoryDumpVolumeRequestOnVMISpec(vmiSpec *v1.VirtualMachineInstanceSpec, claimName string) *v1.VirtualMachineInstanceSpec {
for _, volume := range vmiSpec.Volumes {
if volume.Name == claimName {
return vmiSpec
}
}
memoryDumpVol := &v1.MemoryDumpVolumeSource{
PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: k8score.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
Hotpluggable: true,
},
}
newVolume := v1.Volume{
Name: claimName,
}
newVolume.VolumeSource.MemoryDump = memoryDumpVol
vmiSpec.Volumes = append(vmiSpec.Volumes, newVolume)
return vmiSpec
}
func patchMemoryDumpPVCAnnotation(client kubecli.KubevirtClient, vm *v1.VirtualMachine, pvcStore cache.Store) error {
request := vm.Status.MemoryDumpRequest
pvc, err := storagetypes.GetPersistentVolumeClaimFromCache(vm.Namespace, request.ClaimName, pvcStore)
if err != nil {
log.Log.Object(vm).Errorf("Error getting PersistentVolumeClaim to update memory dump annotation: %v", err)
return err
}
if pvc == nil {
log.Log.Object(vm).Errorf("Error getting PersistentVolumeClaim to update memory dump annotation: %v", err)
return fmt.Errorf("Error when trying to update memory dump annotation, pvc %s not found", request.ClaimName)
}
var patchVal string
switch request.Phase {
case v1.MemoryDumpUnmounting:
// skip patching pvc annotation if file name
// is empty
if request.FileName == nil {
return nil
}
patchVal = *request.FileName
case v1.MemoryDumpFailed:
patchVal = failed
default:
log.Log.Object(vm).Errorf("Unexpected phase when patching memory dump pvc annotation")
return nil
}
annoPatch := patch.New()
if len(pvc.Annotations) == 0 {
annoPatch.AddOption(patch.WithAdd("/metadata/annotations", map[string]string{v1.PVCMemoryDumpAnnotation: patchVal}))
} else if ann, ok := pvc.Annotations[v1.PVCMemoryDumpAnnotation]; ok && ann == patchVal {
return nil
} else {
annoPatch.AddOption(patch.WithReplace("/metadata/annotations/"+patch.EscapeJSONPointer(v1.PVCMemoryDumpAnnotation), patchVal))
}
annoPatchPayload, err := annoPatch.GeneratePayload()
if err != nil {
return err
}
_, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Patch(context.Background(), pvc.Name, types.JSONPatchType, annoPatchPayload, metav1.PatchOptions{})
if err != nil {
log.Log.Object(vm).Errorf("failed to annotate memory dump PVC %s/%s, error: %s", pvc.Namespace, pvc.Name, err)
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package reservation
import (
"path/filepath"
v1 "kubevirt.io/api/core/v1"
)
const (
sourceDaemonsPath = "/var/run/kubevirt/daemons"
hostSourceDaemonsPath = "/proc/1/root" + sourceDaemonsPath
prHelperDir = "pr"
prHelperSocket = "pr-helper.sock"
prResourceName = "pr-helper"
)
func GetPrResourceName() string {
return prResourceName
}
func GetPrHelperSocketDir() string {
return filepath.Join(sourceDaemonsPath, prHelperDir)
}
func GetPrHelperHostSocketDir() string {
return filepath.Join(hostSourceDaemonsPath, prHelperDir)
}
func GetPrHelperSocketPath() string {
return filepath.Join(GetPrHelperSocketDir(), prHelperSocket)
}
func GetPrHelperSocket() string {
return prHelperSocket
}
func HasVMIPersistentReservation(vmi *v1.VirtualMachineInstance) bool {
return HasVMISpecPersistentReservation(&vmi.Spec)
}
func HasVMISpecPersistentReservation(vmiSpec *v1.VirtualMachineInstanceSpec) bool {
for _, disk := range vmiSpec.Domain.Devices.Disks {
if disk.DiskDevice.LUN != nil && disk.DiskDevice.LUN.Reservation {
return true
}
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package snapshot
import (
"context"
"encoding/json"
"fmt"
"maps"
"strings"
"time"
jsonpatch "github.com/evanphx/json-patch"
vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
"github.com/openshift/library-go/pkg/build/naming"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/equality"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
validation "k8s.io/apimachinery/pkg/util/validation"
kubevirtv1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/instancetype/revision"
"kubevirt.io/kubevirt/pkg/pointer"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
typesutil "kubevirt.io/kubevirt/pkg/storage/types"
storageutils "kubevirt.io/kubevirt/pkg/storage/utils"
firmware "kubevirt.io/kubevirt/pkg/virt-controller/watch/vm"
)
const (
RestoreNameAnnotation = "restore.kubevirt.io/name"
vmRestoreFinalizer = "snapshot.kubevirt.io/vmrestore-protection"
populatedForPVCAnnotation = "cdi.kubevirt.io/storage.populatedFor"
lastRestoreAnnotation = "restore.kubevirt.io/lastRestoreUID"
restoreSourceNameLabel = "restore.kubevirt.io/source-vm-name"
restoreSourceNamespaceLabel = "restore.kubevirt.io/source-vm-namespace"
restoreCleanupBackendPVCLabel = "restore.kubevirt.io/cleanup-backend-pvc"
restoreCompleteEvent = "VirtualMachineRestoreComplete"
restoreErrorEvent = "VirtualMachineRestoreError"
restoreVMNotReadyEvent = "RestoreTargetNotReady"
restoreDataVolumeCreateErrorEvent = "RestoreDataVolumeCreateError"
restoreOwnedByVMLabel = "restore.kubevirt.io/owned-by-vm"
defaultPvcRestorePrefix = "restore"
waitEventuallyMessage = "Waiting for target VM to be powered off. Please stop the restore target to proceed with restore"
stopTargetMessage = "Automatically stopping restore target for restore operation"
vmiExistsEventMessage = "Restore target VMI still exists, please stop the restore target to proceed with restore"
targetNotReadyFailureMessage = "Restore target VMI must be powered off before restore operation"
restoreFailedEvent = "Operation failed"
errorRestoreToExistingTarget = "restore source and restore target are different but restore target already exists"
)
var (
restoreGracePeriodExceededError = fmt.Sprintf("Restore target failed to be ready within %s. Please power off the target VM before attempting restore", snapshotv1.DefaultGracePeriod)
waitGracePeriodMessage = fmt.Sprintf("Waiting for target VM to be powered off. Please stop the restore target to proceed with restore, or the operation will fail after %s", snapshotv1.DefaultGracePeriod)
)
type restoreTarget interface {
Stop() error
Ready() (bool, error)
Reconcile() (bool, error)
Own(obj metav1.Object)
UpdateDoneRestore() error
UpdateRestoreInProgress() error
UpdateTarget(obj metav1.Object)
Exists() bool
UID() types.UID
VirtualMachine() *kubevirtv1.VirtualMachine
TargetRestored() bool
}
type vmRestoreTarget struct {
controller *VMRestoreController
vmRestore *snapshotv1.VirtualMachineRestore
vm *kubevirtv1.VirtualMachine
}
var restoreAnnotationsToDelete = []string{
"pv.kubernetes.io",
"volume.beta.kubernetes.io",
"cdi.kubevirt.io",
"volume.kubernetes.io",
"k8s.io/CloneRequest",
"k8s.io/CloneOf",
}
// getRestoreNameOverride returns the overridden name for a volume restore
func getRestoreNameOverride(vmRestore *snapshotv1.VirtualMachineRestore, volumeName string) string {
for _, override := range vmRestore.Spec.VolumeRestoreOverrides {
// User has specified their own destination restore name, use it
if override.VolumeName == volumeName && override.RestoreName != "" {
return override.RestoreName
}
}
return ""
}
// restoreVolumeName computes the name of the restored volume for a given volume within a backup
// volumeName is the original name of the volume being restored
// claimName is the name of the original claim for that same volume (a PVC or a DataVolume)
func restoreVolumeName(vmRestore *snapshotv1.VirtualMachineRestore, volumeName, claimName string) string {
// Check if the user is overriding the restore name
if restoreOverride := getRestoreNameOverride(vmRestore, volumeName); restoreOverride != "" {
return restoreOverride
}
// If the policy is to overwrite the volume, we must return the same backendName name as the source
if isVolumeRestorePolicyInPlace(vmRestore) {
return claimName
}
// Auto-compute the name of the restored backendName from the VMRestore ID and from the original volume name
return fmt.Sprintf("%s-%s-%s", defaultPvcRestorePrefix, vmRestore.UID, volumeName)
}
// restorePVCName computes the name of the restored PVC for a given volume within a backup
// volumeName is the name of the volume being restored
// pvcName is the name of the original PVC for that same volume
func restorePVCName(vmRestore *snapshotv1.VirtualMachineRestore, volumeName, pvcName string) string {
return restoreVolumeName(vmRestore, volumeName, pvcName)
}
// restoreDVName computes the name of a restored DataVolume for a given volume within a backup
// volumeName is the name of the volume being restored
// dvName is the name of the dataVolume being restored
func restoreDVName(vmRestore *snapshotv1.VirtualMachineRestore, volumeName, dvName string) string {
return restoreVolumeName(vmRestore, volumeName, dvName)
}
func vmRestoreFailed(vmRestore *snapshotv1.VirtualMachineRestore) bool {
return vmRestore.Status != nil &&
hasConditionType(vmRestore.Status.Conditions, snapshotv1.ConditionFailure)
}
func vmRestoreCompleted(vmRestore *snapshotv1.VirtualMachineRestore) bool {
return vmRestore.Status != nil && vmRestore.Status.Complete != nil && *vmRestore.Status.Complete
}
func VmRestoreProgressing(vmRestore *snapshotv1.VirtualMachineRestore) bool {
return !vmRestoreCompleted(vmRestore) && !vmRestoreFailed(vmRestore)
}
func vmRestoreDeleting(vmRestore *snapshotv1.VirtualMachineRestore) bool {
return vmRestore != nil && vmRestore.DeletionTimestamp != nil
}
func (ctrl *VMRestoreController) updateVMRestore(vmRestoreIn *snapshotv1.VirtualMachineRestore) (time.Duration, error) {
logger := log.Log.Object(vmRestoreIn)
logger.V(1).Infof("Updating VirtualMachineRestore")
vmRestoreOut := vmRestoreIn.DeepCopy()
if vmRestoreOut.Status == nil {
vmRestoreOut.Status = &snapshotv1.VirtualMachineRestoreStatus{
Complete: pointer.P(false),
}
updateRestoreCondition(vmRestoreOut, newProgressingCondition(corev1.ConditionTrue, "Initializing VirtualMachineRestore"))
updateRestoreCondition(vmRestoreOut, newReadyCondition(corev1.ConditionFalse, "Initializing VirtualMachineRestore"))
}
// let's make sure everything is initialized properly before continuing
if !equality.Semantic.DeepEqual(vmRestoreIn.Status, vmRestoreOut.Status) {
return 0, ctrl.doUpdateStatus(vmRestoreIn, vmRestoreOut)
}
target, err := ctrl.getTarget(vmRestoreOut)
if err != nil {
logger.Reason(err).Error("Error getting restore target")
return 0, ctrl.doUpdateError(vmRestoreOut, err)
}
if vmRestoreDeleting(vmRestoreOut) {
return 0, ctrl.handleVMRestoreDeletion(vmRestoreOut, target)
}
if !VmRestoreProgressing(vmRestoreOut) {
return 0, nil
}
if len(vmRestoreOut.OwnerReferences) == 0 {
target.Own(vmRestoreOut)
}
controller.AddFinalizer(vmRestoreOut, vmRestoreFinalizer)
if !equality.Semantic.DeepEqual(vmRestoreIn.ObjectMeta, vmRestoreOut.ObjectMeta) {
vmRestoreOut, err = ctrl.Client.VirtualMachineRestore(vmRestoreOut.Namespace).Update(context.Background(), vmRestoreOut, metav1.UpdateOptions{})
if err != nil {
logger.Reason(err).Error("Error updating owner references")
return 0, err
}
}
ready, err := target.Ready()
if err != nil {
logger.Reason(err).Error("Error checking target ready")
return 0, ctrl.doUpdateError(vmRestoreIn, err)
}
if !ready {
return 0, ctrl.handleVMRestoreTargetNotReady(vmRestoreOut, target)
}
vmSnapshot, err := ctrl.getVMSnapshot(vmRestoreOut)
if err != nil {
return 0, ctrl.doUpdateError(vmRestoreIn, err)
}
// Check if target exists before the restore
// and that it is not the same as the source
// We do not allow restoring to an existing
// target which is not the same as the source
if target.Exists() && !target.TargetRestored() && sourceAndTargetAreDifferent(target, vmSnapshot) {
logger.Error(errorRestoreToExistingTarget)
return 0, ctrl.doUpdateError(vmRestoreIn, fmt.Errorf(errorRestoreToExistingTarget))
}
err = target.UpdateRestoreInProgress()
if err != nil {
return 0, err
}
updated, err := ctrl.reconcileVolumeRestores(vmRestoreOut, target, vmSnapshot)
if err != nil {
logger.Reason(err).Error("Error reconciling VolumeRestores")
return 0, ctrl.doUpdateError(vmRestoreIn, err)
}
if updated {
updateRestoreCondition(vmRestoreOut, newProgressingCondition(corev1.ConditionTrue, "Creating new PVCs"))
updateRestoreCondition(vmRestoreOut, newReadyCondition(corev1.ConditionFalse, "Waiting for new PVCs"))
return 0, ctrl.doUpdateStatus(vmRestoreIn, vmRestoreOut)
}
updated, err = target.Reconcile()
if err != nil {
logger.Reason(err).Error("Error reconciling target")
return 0, ctrl.doUpdateError(vmRestoreIn, err)
}
if updated {
updateRestoreCondition(vmRestoreOut, newProgressingCondition(corev1.ConditionTrue, "Updating target spec"))
updateRestoreCondition(vmRestoreOut, newReadyCondition(corev1.ConditionFalse, "Waiting for target update"))
return 0, ctrl.doUpdateStatus(vmRestoreIn, vmRestoreOut)
}
if err = ctrl.deleteObsoleteVolumes(vmRestoreOut, target); err != nil {
logger.Reason(err).Error("Error cleaning up")
return 0, ctrl.doUpdateError(vmRestoreIn, err)
}
err = target.UpdateDoneRestore()
if err != nil {
logger.Reason(err).Error("Error updating done restore")
return 0, ctrl.doUpdateError(vmRestoreIn, err)
}
ctrl.Recorder.Eventf(
vmRestoreOut,
corev1.EventTypeNormal,
restoreCompleteEvent,
"Successfully completed VirtualMachineRestore %s",
vmRestoreOut.Name,
)
t := true
vmRestoreOut.Status.Complete = &t
vmRestoreOut.Status.RestoreTime = currentTime()
updateRestoreCondition(vmRestoreOut, newProgressingCondition(corev1.ConditionFalse, "Operation complete"))
updateRestoreCondition(vmRestoreOut, newReadyCondition(corev1.ConditionTrue, "Operation complete"))
return 0, ctrl.doUpdateStatus(vmRestoreIn, vmRestoreOut)
}
func (ctrl *VMRestoreController) doUpdateError(restore *snapshotv1.VirtualMachineRestore, err error) error {
if updateErr := ctrl.doUpdateErrorWithFailure(restore, err.Error(), false); updateErr != nil {
return updateErr
}
return err
}
func (ctrl *VMRestoreController) doUpdateErrorWithFailure(restore *snapshotv1.VirtualMachineRestore, errMsg string, fail bool) error {
updated := restore.DeepCopy()
eventReason := restoreErrorEvent
eventMsg := fmt.Sprintf("VirtualMachineRestore encountered error %s", errMsg)
updateRestoreCondition(updated, newProgressingCondition(corev1.ConditionFalse, errMsg))
updateRestoreCondition(updated, newReadyCondition(corev1.ConditionFalse, errMsg))
if fail {
eventReason = restoreFailedEvent
eventMsg = fmt.Sprintf("VirtualMachineRestore failed %s", errMsg)
updateRestoreCondition(updated, newFailureCondition(corev1.ConditionTrue, errMsg))
}
ctrl.Recorder.Eventf(
restore,
corev1.EventTypeWarning,
eventReason,
eventMsg,
)
return ctrl.doUpdateStatus(restore, updated)
}
func (ctrl *VMRestoreController) doUpdateStatus(original, updated *snapshotv1.VirtualMachineRestore) error {
if !equality.Semantic.DeepEqual(original.Status, updated.Status) {
if _, err := ctrl.Client.VirtualMachineRestore(updated.Namespace).UpdateStatus(context.Background(), updated, metav1.UpdateOptions{}); err != nil {
return err
}
}
return nil
}
func (ctrl *VMRestoreController) handleVMRestoreDeletion(vmRestore *snapshotv1.VirtualMachineRestore, target restoreTarget) error {
logger := log.Log.Object(vmRestore)
logger.V(3).Infof("Handling deleted VirtualMachineRestore")
if !controller.HasFinalizer(vmRestore, vmRestoreFinalizer) {
return nil
}
vmRestoreCpy := vmRestore.DeepCopy()
if target.Exists() {
err := target.UpdateDoneRestore()
if err != nil {
logger.Reason(err).Error("Error updating done restore")
return ctrl.doUpdateError(vmRestoreCpy, err)
}
}
updateRestoreCondition(vmRestoreCpy, newProgressingCondition(corev1.ConditionFalse, "VM restore is deleting"))
updateRestoreCondition(vmRestoreCpy, newReadyCondition(corev1.ConditionFalse, "VM restore is deleting"))
if !equality.Semantic.DeepEqual(vmRestore.Status, vmRestoreCpy.Status) {
return ctrl.doUpdateStatus(vmRestore, vmRestoreCpy)
}
controller.RemoveFinalizer(vmRestoreCpy, vmRestoreFinalizer)
patch, err := generateFinalizerPatch(vmRestore.Finalizers, vmRestoreCpy.Finalizers)
if err != nil {
return err
}
_, err = ctrl.Client.VirtualMachineRestore(vmRestore.Namespace).Patch(context.Background(), vmRestore.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
return err
}
func (ctrl *VMRestoreController) handleVMRestoreTargetNotReady(vmRestore *snapshotv1.VirtualMachineRestore, target restoreTarget) error {
vmRestoreCpy := vmRestore.DeepCopy()
// Default targetReadinessPolicy is having a grace period for the user the make
// the target ready
targetReadinessPolicy := snapshotv1.VirtualMachineRestoreWaitGracePeriodAndFail
if vmRestore.Spec.TargetReadinessPolicy != nil {
targetReadinessPolicy = *vmRestore.Spec.TargetReadinessPolicy
}
var reason, eventMsg string
switch targetReadinessPolicy {
case snapshotv1.VirtualMachineRestoreWaitEventually:
reason = waitEventuallyMessage
eventMsg = vmiExistsEventMessage
case snapshotv1.VirtualMachineRestoreStopTarget:
return ctrl.stopTarget(vmRestore, target)
case snapshotv1.VirtualMachineRestoreWaitGracePeriodAndFail:
if vmRestoreTargetReadyGracePeriodExceeded(vmRestore) {
return ctrl.doUpdateErrorWithFailure(vmRestore, restoreGracePeriodExceededError, true)
}
reason = waitGracePeriodMessage
eventMsg = vmiExistsEventMessage
case snapshotv1.VirtualMachineRestoreFailImmediate:
return ctrl.doUpdateErrorWithFailure(vmRestore, targetNotReadyFailureMessage, true)
default:
return fmt.Errorf("unknown targetReadinessPolicy: %v", targetReadinessPolicy)
}
ctrl.Recorder.Event(vmRestoreCpy, corev1.EventTypeWarning, restoreVMNotReadyEvent, eventMsg)
updateRestoreCondition(vmRestoreCpy, newProgressingCondition(corev1.ConditionFalse, reason))
updateRestoreCondition(vmRestoreCpy, newReadyCondition(corev1.ConditionFalse, reason))
return ctrl.doUpdateStatus(vmRestore, vmRestoreCpy)
}
func (ctrl *VMRestoreController) stopTarget(vmRestore *snapshotv1.VirtualMachineRestore, target restoreTarget) error {
vmRestoreCpy := vmRestore.DeepCopy()
ctrl.Recorder.Event(vmRestoreCpy, corev1.EventTypeWarning, restoreVMNotReadyEvent, stopTargetMessage)
updateRestoreCondition(vmRestoreCpy, newProgressingCondition(corev1.ConditionFalse, stopTargetMessage))
updateRestoreCondition(vmRestoreCpy, newReadyCondition(corev1.ConditionFalse, stopTargetMessage))
// Stop the restore target
err := target.Stop()
if err != nil {
return ctrl.doUpdateError(vmRestoreCpy, err)
}
return ctrl.doUpdateStatus(vmRestore, vmRestoreCpy)
}
func vmRestoreTargetReadyGracePeriodExceeded(vmRestore *snapshotv1.VirtualMachineRestore) bool {
deadline := vmRestore.CreationTimestamp.Add(snapshotv1.DefaultGracePeriod)
return time.Until(deadline) < 0
}
func (ctrl *VMRestoreController) reconcileVolumeRestores(vmRestore *snapshotv1.VirtualMachineRestore, target restoreTarget, vmSnapshot *snapshotv1.VirtualMachineSnapshot) (bool, error) {
content, err := ctrl.getSnapshotContent(vmSnapshot)
if err != nil {
return false, err
}
noRestore, err := ctrl.volumesNotForRestore(content)
if err != nil {
return false, err
}
var restores []snapshotv1.VolumeRestore
for _, vb := range content.Spec.VolumeBackups {
if noRestore.Has(vb.VolumeName) {
continue
}
found := false
for _, vr := range vmRestore.Status.Restores {
if vb.VolumeName == vr.VolumeName {
restores = append(restores, vr)
found = true
break
}
}
if !found {
if vb.VolumeSnapshotName == nil {
return false, fmt.Errorf("VolumeSnapshotName missing %+v", vb)
}
pvcName := restorePVCName(vmRestore, vb.VolumeName, vb.PersistentVolumeClaim.Name)
vr := snapshotv1.VolumeRestore{
VolumeName: vb.VolumeName,
PersistentVolumeClaimName: pvcName,
VolumeSnapshotName: *vb.VolumeSnapshotName,
}
restores = append(restores, vr)
}
}
if !equality.Semantic.DeepEqual(vmRestore.Status.Restores, restores) {
if len(vmRestore.Status.Restores) > 0 {
log.Log.Object(vmRestore).Warning("VMRestore in strange state")
}
vmRestore.Status.Restores = restores
return true, nil
}
createdPVC := false
deletedPVC := false
waitingPVC := false
waitingDVNameUpdate := false
for i, restore := range restores {
pvc, err := ctrl.getPVC(vmRestore.Namespace, restore.PersistentVolumeClaimName)
if err != nil {
return false, err
}
if pvc == nil {
backup, err := getRestoreVolumeBackup(restore.VolumeName, content)
if err != nil {
return false, err
}
var dvOwner string
if restore.DataVolumeName != nil {
dvOwner = *restore.DataVolumeName
}
if err = ctrl.createRestorePVC(vmRestore, target, backup, &restore, content.Spec.Source.VirtualMachine, dvOwner); err != nil {
return false, err
}
createdPVC = true
} else if isVolumeRestorePolicyInPlace(vmRestore) && !hasLastRestoreAnnotation(vmRestore, pvc) {
// This volume is backed by a DataVolume, and we're about to delete the PVC of that DV. This PVC will be re-created shortly after
// from a VolumeSnapshot, and the DV should rebind to its PVC. But that leaves the DV with no PVC for a short amount of time.
// To prevent race conditions and possible reconciles of the DV during the PVC restore, we mark it as prePopulated to prevent any
// accidental creation of a PVC by the DataVolume.
var ownerDV string
ownerReference := metav1.GetControllerOf(pvc)
if ownerReference != nil && ownerReference.Kind == "DataVolume" {
ownerDV = ownerReference.Name
}
if ownerDV != "" {
log.Log.Object(vmRestore).Infof("marking datavolume %s/%s as prepopulated before deleting its PVC", vmRestore.Namespace, ownerDV)
// We update the status of the volume to note that it belongs to a DataVolume.
// We'll need this information later to restore the PVC with annotations to rebind it
// to the DV.
if vmRestore.Status.Restores[i].DataVolumeName == nil {
vmRestore.Status.Restores[i].DataVolumeName = &ownerDV
waitingDVNameUpdate = true
continue
}
if err := ctrl.prepopulateDataVolume(vmRestore.Namespace, ownerDV, vmRestore.Name); err != nil {
return false, err
}
}
// If we're here, the PVC associated with that volume exists, and needs to be wiped before we restore in its place
log.Log.Object(vmRestore).Infof("deleting %s/%s to replace volume due to policy InPlace", vmRestore.Namespace, pvc.Name)
if err = ctrl.Client.CoreV1().PersistentVolumeClaims(vmRestore.Namespace).
Delete(context.Background(), pvc.Name, metav1.DeleteOptions{}); err != nil {
return false, err
}
deletedPVC = true
} else if pvc.Status.Phase == corev1.ClaimPending {
bindingMode, err := ctrl.getBindingMode(pvc)
if err != nil {
return false, err
}
if bindingMode == nil || *bindingMode == storagev1.VolumeBindingImmediate {
waitingPVC = true
}
} else if pvc.Status.Phase != corev1.ClaimBound {
return false, fmt.Errorf("PVC %s/%s in status %q", pvc.Namespace, pvc.Name, pvc.Status.Phase)
}
}
return createdPVC || deletedPVC || waitingPVC || waitingDVNameUpdate, nil
}
func (ctrl *VMRestoreController) getBindingMode(pvc *corev1.PersistentVolumeClaim) (*storagev1.VolumeBindingMode, error) {
if pvc.Spec.StorageClassName == nil {
return nil, nil
}
obj, exists, err := ctrl.StorageClassInformer.GetStore().GetByKey(*pvc.Spec.StorageClassName)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("StorageClass %s does not exist", *pvc.Spec.StorageClassName)
}
sc := obj.(*storagev1.StorageClass).DeepCopy()
return sc.VolumeBindingMode, nil
}
func (t *vmRestoreTarget) UpdateDoneRestore() error {
if !t.Exists() {
return fmt.Errorf("At this point target should exist")
}
if t.vm.Status.RestoreInProgress == nil || *t.vm.Status.RestoreInProgress != t.vmRestore.Name {
return nil
}
vmCopy := t.vm.DeepCopy()
vmCopy.Status.RestoreInProgress = nil
vmCopy.Status.MemoryDumpRequest = nil
vmCopy, err := t.controller.Client.VirtualMachine(vmCopy.Namespace).UpdateStatus(context.Background(), vmCopy, metav1.UpdateOptions{})
if err != nil {
return err
}
t.vm = vmCopy
return nil
}
func (t *vmRestoreTarget) UpdateRestoreInProgress() error {
if !t.Exists() || hasLastRestoreAnnotation(t.vmRestore, t.vm) {
return nil
}
if t.vm.Status.RestoreInProgress != nil && *t.vm.Status.RestoreInProgress != t.vmRestore.Name {
return fmt.Errorf("vm restore %s in progress", *t.vm.Status.RestoreInProgress)
}
vmCopy := t.vm.DeepCopy()
if vmCopy.Status.RestoreInProgress == nil {
vmCopy.Status.RestoreInProgress = &t.vmRestore.Name
var err error
vmCopy, err = t.controller.Client.VirtualMachine(vmCopy.Namespace).UpdateStatus(context.Background(), vmCopy, metav1.UpdateOptions{})
if err != nil {
return err
}
}
t.vm = vmCopy
return nil
}
func (t *vmRestoreTarget) Stop() error {
if !t.Exists() {
return nil
}
log.Log.Infof("Stopping VM before restore [%s/%s]", t.vm.Namespace, t.vm.Name)
return t.controller.Client.VirtualMachine(t.vm.Namespace).Stop(context.Background(), t.vm.Name, &kubevirtv1.StopOptions{})
}
func (t *vmRestoreTarget) Ready() (bool, error) {
if !t.Exists() {
return true, nil
}
log.Log.Object(t.vmRestore).V(3).Info("Checking VM ready")
vmiKey, err := controller.KeyFunc(t.vm)
if err != nil {
return false, err
}
_, exists, err := t.controller.VMIInformer.GetStore().GetByKey(vmiKey)
return !exists, err
}
func (t *vmRestoreTarget) Reconcile() (bool, error) {
if t.Exists() && hasLastRestoreAnnotation(t.vmRestore, t.vm) {
return false, nil
}
snapshotVM, err := t.getSnapshotVM()
if err != nil {
return false, err
}
if updated, err := t.updateVMRestoreRestores(snapshotVM); updated || err != nil {
return updated, err
}
restoredVM, err := t.generateRestoredVMSpec(snapshotVM)
if err != nil {
return false, err
}
if updated, err := t.reconcileDataVolumes(restoredVM); updated || err != nil {
return updated, err
}
// Reconcile backend storage PVC since it's not part of the VM/VMI spec
if ready, err := t.reconcileBackendVolume(snapshotVM); !ready || err != nil {
return !ready, err
}
return t.reconcileSpec(restoredVM)
}
func (t *vmRestoreTarget) reconcileBackendVolume(snapshotVM *snapshotv1.VirtualMachine) (bool, error) {
if !backendstorage.IsBackendStorageNeeded(snapshotVM) {
return true, nil
}
// Retrieve only the backend volume
volumes, err := storageutils.GetVolumes(snapshotVM, t.controller.Client, storageutils.WithBackendVolume)
if err != nil {
// Not checking for ErrNoBackendPVC, simply returning
// error as backend PVC should exist now
return false, err
}
isRestorePVCUpdated := false
for _, volume := range volumes {
pvc, err := t.controller.getPVC(snapshotVM.Namespace, volume.VolumeSource.PersistentVolumeClaim.ClaimName)
if err != nil || pvc == nil {
return false, err
}
// Step 1: Remove backend label from the original backend PVC
updated, err := t.removeBackendLabelFromPVC(pvc, snapshotVM.Name)
if err != nil {
return false, err
}
// Step 2: Update the restore PVC with backend labels
isRestorePVCUpdated, err = t.updateRestorePVCWithBackendLabel(pvc)
if err != nil {
return false, err
}
isRestorePVCUpdated = updated || isRestorePVCUpdated
}
return isRestorePVCUpdated, nil
}
func (t *vmRestoreTarget) removeBackendLabelFromPVC(pvc *corev1.PersistentVolumeClaim, snapshotVMName string) (bool, error) {
if pvc.Labels == nil {
return false, nil
}
// Only remove label when the VM name is the same since the backend logic filters by VM name + label
if t.vmRestore.Spec.Target.Name == snapshotVMName {
for _, vr := range t.vmRestore.Status.Restores {
if vr.PersistentVolumeClaimName == pvc.Name {
log.Log.Object(t.vmRestore).V(3).Infof("Restore PVC %s updated with backend label", pvc.Name)
return true, nil
}
}
// Remove the backend label.
newLabels := getFilteredLabels(pvc.Labels)
// Adding this label to identify the original backend PVC and garbage-collect it.
newLabels[restoreCleanupBackendPVCLabel] = getCleanupLabelValue(t.vmRestore)
// Generate patch to remove the backend label
patchBytes, err := patch.New(
patch.WithTest("/metadata/labels", pvc.Labels),
patch.WithReplace("/metadata/labels", newLabels),
).GeneratePayload()
if err != nil {
return false, err
}
_, err = t.controller.Client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Patch(context.Background(), pvc.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
return false, err
}
return false, nil
}
func (t *vmRestoreTarget) updateRestorePVCWithBackendLabel(originalPVC *corev1.PersistentVolumeClaim) (bool, error) {
for _, vr := range t.vmRestore.Status.Restores {
if vr.VolumeName == storageutils.BackendPVCVolumeName(t.vmRestore.Spec.Target.Name) {
restorePVC, err := t.controller.getPVC(t.vmRestore.Namespace, vr.PersistentVolumeClaimName)
if err != nil {
return false, err
}
// This means the restore PVC is already updated
if restorePVC.Name == originalPVC.Name {
return true, nil
}
// Patch restore PVC with backend label
patchSet := patch.New()
if restorePVC.Labels == nil {
patchSet.AddOption(patch.WithAdd("/metadata/labels", map[string]string{
backendstorage.PVCPrefix: t.vmRestore.Spec.Target.Name,
}))
} else {
updatedLabels := make(map[string]string, len(restorePVC.Labels))
for k, v := range restorePVC.Labels {
updatedLabels[k] = v
}
updatedLabels[backendstorage.PVCPrefix] = t.vmRestore.Spec.Target.Name
patchSet.AddOption(
patch.WithTest("/metadata/labels", restorePVC.Labels),
patch.WithReplace("/metadata/labels", updatedLabels),
)
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return false, err
}
_, err = t.controller.Client.CoreV1().PersistentVolumeClaims(restorePVC.Namespace).Patch(context.Background(), restorePVC.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return false, err
}
}
}
return false, nil
}
func getCleanupLabelValue(vmRestore *snapshotv1.VirtualMachineRestore) string {
return naming.GetName(backendstorage.PVCPrefix, vmRestore.Spec.Target.Name, validation.DNS1035LabelMaxLength)
}
func (t *vmRestoreTarget) getSnapshotVM() (*snapshotv1.VirtualMachine, error) {
vmSnapshot, err := t.controller.getVMSnapshot(t.vmRestore)
if err != nil {
return nil, err
}
content, err := t.controller.getSnapshotContent(vmSnapshot)
if err != nil {
return nil, err
}
snapshotVM := content.Spec.Source.VirtualMachine
if snapshotVM == nil {
return nil, fmt.Errorf("unexpected snapshot source")
}
return snapshotVM, nil
}
func (t *vmRestoreTarget) updateVMRestoreRestores(snapshotVM *snapshotv1.VirtualMachine) (bool, error) {
var restores = make([]snapshotv1.VolumeRestore, len(t.vmRestore.Status.Restores))
for i, t := range t.vmRestore.Status.Restores {
t.DeepCopyInto(&restores[i])
}
for k := range restores {
restore := &restores[k]
// Just need to access the regular VM volumes here as the backend volume
// is handled separately.
volumes, err := storageutils.GetVolumes(snapshotVM, t.controller.Client)
if err != nil {
return false, err
}
for _, volume := range volumes {
if volume.Name != restore.VolumeName {
continue
}
if volume.DataVolume != nil {
templateIndex := findDVTemplateIndex(volume.DataVolume.Name, snapshotVM)
if templateIndex >= 0 {
dvName := restoreDVName(t.vmRestore, restore.VolumeName, volume.DataVolume.Name)
pvc, err := t.controller.getPVC(t.vmRestore.Namespace, restore.PersistentVolumeClaimName)
if err != nil {
return false, err
}
if pvc == nil {
return false, fmt.Errorf("pvc %s/%s does not exist and should", t.vmRestore.Namespace, restore.PersistentVolumeClaimName)
}
if err = t.updatePVCPopulatedForAnnotation(pvc, dvName); err != nil {
return false, err
}
restore.DataVolumeName = &dvName
break
}
}
}
}
if !equality.Semantic.DeepEqual(t.vmRestore.Status.Restores, restores) {
t.vmRestore.Status.Restores = restores
return true, nil
}
return false, nil
}
func (t *vmRestoreTarget) UpdateTarget(obj metav1.Object) {
t.vm = obj.(*kubevirtv1.VirtualMachine)
}
func (t *vmRestoreTarget) generateRestoredVMSpec(snapshotVM *snapshotv1.VirtualMachine) (*kubevirtv1.VirtualMachine, error) {
log.Log.Object(t.vmRestore).V(3).Info("generating restored VM spec")
var newTemplates = make([]kubevirtv1.DataVolumeTemplateSpec, len(snapshotVM.Spec.DataVolumeTemplates))
var newVolumes []kubevirtv1.Volume
for i, t := range snapshotVM.Spec.DataVolumeTemplates {
t.DeepCopyInto(&newTemplates[i])
}
// Just need to access the regular VM volumes here as the backend volume
// doesn't need to be included in the VM spec.
volumes, err := storageutils.GetVolumes(snapshotVM, t.controller.Client)
if err != nil {
return nil, err
}
for _, v := range volumes {
nv := v.DeepCopy()
if nv.DataVolume != nil || nv.PersistentVolumeClaim != nil {
for _, vr := range t.vmRestore.Status.Restores {
if vr.VolumeName != nv.Name {
continue
}
if nv.DataVolume == nil {
nv.PersistentVolumeClaim.ClaimName = vr.PersistentVolumeClaimName
continue
}
templateIndex := findDVTemplateIndex(v.DataVolume.Name, snapshotVM)
if templateIndex >= 0 {
if vr.DataVolumeName == nil {
return nil, fmt.Errorf("DataVolumeName for dv %s should have been updated already", v.DataVolume.Name)
}
dv := snapshotVM.Spec.DataVolumeTemplates[templateIndex].DeepCopy()
dv.Name = *vr.DataVolumeName
newTemplates[templateIndex] = *dv
nv.DataVolume.Name = *vr.DataVolumeName
} else {
// convert to PersistentVolumeClaim volume
nv = &kubevirtv1.Volume{
Name: nv.Name,
VolumeSource: kubevirtv1.VolumeSource{
PersistentVolumeClaim: &kubevirtv1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
ClaimName: vr.PersistentVolumeClaimName,
},
},
},
}
}
}
} else if nv.MemoryDump != nil {
// don't restore memory dump volume in the new spec
continue
}
newVolumes = append(newVolumes, *nv)
}
var newVM *kubevirtv1.VirtualMachine
if !t.Exists() {
newVM = &kubevirtv1.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Name: t.vmRestore.Spec.Target.Name,
Namespace: t.vmRestore.Namespace,
Labels: snapshotVM.Labels,
Annotations: snapshotVM.Annotations,
},
Spec: *snapshotVM.Spec.DeepCopy(),
Status: kubevirtv1.VirtualMachineStatus{},
}
if newVM.Spec.Running != nil {
newVM.Spec.Running = pointer.P(false)
} else {
newVM.Spec.RunStrategy = pointer.P(kubevirtv1.RunStrategyHalted)
}
} else {
newVM = t.vm.DeepCopy()
newVM.Spec = *snapshotVM.Spec.DeepCopy()
if t.vm.Spec.Running != nil {
newVM.Spec.Running = pointer.P(false)
newVM.Spec.RunStrategy = nil
} else {
runStrategy, err := t.vm.RunStrategy()
if err != nil {
return nil, err
}
// make sure an existing VM keeps the same run strategy as before the restore
newVM.Spec.RunStrategy = pointer.P(runStrategy)
newVM.Spec.Running = nil
}
}
newVM.Spec.DataVolumeTemplates = newTemplates
newVM.Spec.Template.Spec.Volumes = newVolumes
setLastRestoreAnnotation(t.vmRestore, newVM)
if snapshotVM.Name == newVM.Name {
setLegacyFirmwareUUID(newVM)
}
return newVM, nil
}
func (t *vmRestoreTarget) reconcileSpec(restoredVM *kubevirtv1.VirtualMachine) (bool, error) {
log.Log.Object(t.vmRestore).V(3).Info("Reconcile new VM spec")
var err error
if err = t.restoreInstancetypeControllerRevisions(restoredVM); err != nil {
return false, err
}
if !t.Exists() {
restoredVM, err = patchVM(restoredVM, t.vmRestore.Spec.Patches)
if err != nil {
return false, fmt.Errorf("error patching VM %s: %v", restoredVM.Name, err)
}
restoredVM, err = t.controller.Client.VirtualMachine(t.vmRestore.Namespace).Create(context.Background(), restoredVM, metav1.CreateOptions{})
} else {
restoredVM, err = t.controller.Client.VirtualMachine(restoredVM.Namespace).Update(context.Background(), restoredVM, metav1.UpdateOptions{})
}
if err != nil {
return false, err
}
t.UpdateTarget(restoredVM)
if err = t.claimInstancetypeControllerRevisionsOwnership(t.vm); err != nil {
return false, err
}
if err = t.updateRestorePVCOwnership(); err != nil {
return false, err
}
return true, nil
}
func (t *vmRestoreTarget) updateRestorePVCOwnership() error {
if isVolumeOwnershipPolicyNone(t.vmRestore) || !t.Exists() {
return nil
}
for _, volume := range t.VirtualMachine().Spec.Template.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
pvc, err := t.controller.Client.CoreV1().PersistentVolumeClaims(t.vmRestore.Namespace).Get(context.Background(), volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
if err != nil {
return err
}
// Check if the PVC is already owned by something else
if len(pvc.OwnerReferences) == 0 {
// Only set the owner reference if the PVC was originally owned by the source VM
if pvc.Annotations[restoreOwnedByVMLabel] == t.vmRestore.Name {
t.Own(pvc)
delete(pvc.Annotations, restoreOwnedByVMLabel)
// Update the PVC to have the owner reference
_, err = t.controller.Client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(context.Background(), pvc, metav1.UpdateOptions{})
if err != nil {
return err
}
}
}
}
}
return nil
}
func findDVTemplateIndex(dvName string, vm *snapshotv1.VirtualMachine) int {
templateIndex := -1
for i, dvt := range vm.Spec.DataVolumeTemplates {
if dvName == dvt.Name {
templateIndex = i
break
}
}
return templateIndex
}
func (t *vmRestoreTarget) updatePVCPopulatedForAnnotation(pvc *corev1.PersistentVolumeClaim, dvName string) error {
updatePVC := pvc.DeepCopy()
if updatePVC.Annotations[populatedForPVCAnnotation] != dvName {
if updatePVC.Annotations == nil {
updatePVC.Annotations = make(map[string]string)
}
updatePVC.Annotations[populatedForPVCAnnotation] = dvName
// datavolume will take ownership
updatePVC.OwnerReferences = nil
_, err := t.controller.Client.CoreV1().PersistentVolumeClaims(updatePVC.Namespace).Update(context.Background(), updatePVC, metav1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
// findDatavolumesForDeletion find DataVolumes that will no longer
// exist after the vmrestore is completed
func findDatavolumesForDeletion(oldDVTemplates, newDVTemplates []kubevirtv1.DataVolumeTemplateSpec) []string {
var deletedDataVolumes []string
for _, cdv := range oldDVTemplates {
found := false
for _, ndv := range newDVTemplates {
if cdv.Name == ndv.Name {
found = true
break
}
}
if !found {
deletedDataVolumes = append(deletedDataVolumes, cdv.Name)
}
}
return deletedDataVolumes
}
func (t *vmRestoreTarget) reconcileDataVolumes(restoredVM *kubevirtv1.VirtualMachine) (bool, error) {
createdDV := false
waitingDV := false
for _, dvt := range restoredVM.Spec.DataVolumeTemplates {
dv, err := t.controller.getDV(restoredVM.Namespace, dvt.Name)
if err != nil {
return false, err
}
if dv != nil {
waitingDV = waitingDV ||
(dv.Status.Phase != cdiv1.Succeeded &&
dv.Status.Phase != cdiv1.WaitForFirstConsumer &&
dv.Status.Phase != cdiv1.PendingPopulation)
continue
}
created, err := t.createDataVolume(restoredVM, dvt)
if err != nil {
return false, err
}
createdDV = createdDV || created
}
if t.Exists() {
deletedDataVolumes := findDatavolumesForDeletion(t.vm.Spec.DataVolumeTemplates, restoredVM.Spec.DataVolumeTemplates)
if !equality.Semantic.DeepEqual(t.vmRestore.Status.DeletedDataVolumes, deletedDataVolumes) {
t.vmRestore.Status.DeletedDataVolumes = deletedDataVolumes
return true, nil
}
}
return createdDV || waitingDV, nil
}
func (t *vmRestoreTarget) getControllerRevision(namespace, name string) (*appsv1.ControllerRevision, error) {
revisionKey := cacheKeyFunc(namespace, name)
obj, exists, err := t.controller.CRInformer.GetStore().GetByKey(revisionKey)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("Unable to find ControllerRevision %s", revisionKey)
}
return obj.(*appsv1.ControllerRevision), nil
}
func (t *vmRestoreTarget) restoreInstancetypeControllerRevision(vmSnapshotRevisionName, vmSnapshotName string, vm *kubevirtv1.VirtualMachine) (*appsv1.ControllerRevision, error) {
snapshotCR, err := t.getControllerRevision(vm.Namespace, vmSnapshotRevisionName)
if err != nil {
return nil, err
}
// Switch the snapshot and vm names for the restored CR name
restoredCRName := strings.Replace(vmSnapshotRevisionName, vmSnapshotName, vm.Name, 1)
restoredCR := snapshotCR.DeepCopy()
restoredCR.ObjectMeta.Reset()
restoredCR.ObjectMeta.SetLabels(snapshotCR.Labels)
restoredCR.Name = restoredCRName
// If the target VirtualMachine already exists it's likely that the original ControllerRevision is already present.
// Check that here by attempting to lookup the CR using the generated restoredCRName.
// Ignore any NotFound errors raised allowing the CR to be restored below.
if t.Exists() {
existingCR, err := t.getControllerRevision(vm.Namespace, restoredCRName)
if err != nil && !k8serrors.IsNotFound(err) {
return nil, err
}
if existingCR != nil {
// Ensure that the existing CR contains the expected data from the snapshot before returning it
equal, err := revision.Compare(snapshotCR, existingCR)
if err != nil {
return nil, err
}
if equal {
return existingCR, nil
}
// Otherwise as CRs are immutable delete the existing CR so we can restore the version from the snapshot below
if err := t.controller.Client.AppsV1().ControllerRevisions(vm.Namespace).Delete(context.Background(), existingCR.Name, metav1.DeleteOptions{}); err != nil {
return nil, err
}
// As the VirtualMachine already exists here we can also populate the OwnerReference, avoiding the need to do so later during claimInstancetypeControllerRevisionOwnership
restoredCR.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(vm, kubevirtv1.VirtualMachineGroupVersionKind)}
}
}
restoredCR, err = t.controller.Client.AppsV1().ControllerRevisions(vm.Namespace).Create(context.Background(), restoredCR, metav1.CreateOptions{})
// This might not be our first time through the reconcile loop so accommodate previous calls to restoreInstancetypeControllerRevision by ignoring unexpected existing CRs for now.
// TODO - Check the contents of the existing CR here against that of the snapshot CR
if err != nil && !k8serrors.IsAlreadyExists(err) {
return nil, err
}
return restoredCR, nil
}
func (t *vmRestoreTarget) restoreInstancetypeControllerRevisions(vm *kubevirtv1.VirtualMachine) error {
if vm.Spec.Instancetype != nil && vm.Spec.Instancetype.RevisionName != "" {
restoredCR, err := t.restoreInstancetypeControllerRevision(vm.Spec.Instancetype.RevisionName, t.vmRestore.Spec.VirtualMachineSnapshotName, vm)
if err != nil {
return err
}
vm.Spec.Instancetype.RevisionName = restoredCR.Name
}
if vm.Spec.Preference != nil && vm.Spec.Preference.RevisionName != "" {
restoredCR, err := t.restoreInstancetypeControllerRevision(vm.Spec.Preference.RevisionName, t.vmRestore.Spec.VirtualMachineSnapshotName, vm)
if err != nil {
return err
}
vm.Spec.Preference.RevisionName = restoredCR.Name
}
return nil
}
func (t *vmRestoreTarget) claimInstancetypeControllerRevisionOwnership(revisionName string, vm *kubevirtv1.VirtualMachine) error {
cr, err := t.getControllerRevision(vm.Namespace, revisionName)
if err != nil {
return err
}
if !metav1.IsControlledBy(cr, vm) {
cr.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(vm, kubevirtv1.VirtualMachineGroupVersionKind)}
_, err = t.controller.Client.AppsV1().ControllerRevisions(vm.Namespace).Update(context.Background(), cr, metav1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
func (t *vmRestoreTarget) claimInstancetypeControllerRevisionsOwnership(vm *kubevirtv1.VirtualMachine) error {
if vm.Spec.Instancetype != nil && vm.Spec.Instancetype.RevisionName != "" {
if err := t.claimInstancetypeControllerRevisionOwnership(vm.Spec.Instancetype.RevisionName, vm); err != nil {
return err
}
}
if vm.Spec.Preference != nil && vm.Spec.Preference.RevisionName != "" {
if err := t.claimInstancetypeControllerRevisionOwnership(vm.Spec.Preference.RevisionName, vm); err != nil {
return err
}
}
return nil
}
func (t *vmRestoreTarget) createDataVolume(restoredVM *kubevirtv1.VirtualMachine, dvt kubevirtv1.DataVolumeTemplateSpec) (bool, error) {
pvc, err := t.controller.getPVC(restoredVM.Namespace, dvt.Name)
if err != nil {
return false, err
}
if pvc == nil {
return false, fmt.Errorf("when creating restore dv pvc %s/%s does not exist and should",
t.vmRestore.Namespace, dvt.Name)
}
if pvc.Annotations[populatedForPVCAnnotation] != dvt.Name || len(pvc.OwnerReferences) > 0 {
return false, nil
}
newDataVolume, err := typesutil.GenerateDataVolumeFromTemplate(t.controller.Client, dvt, restoredVM.Namespace, restoredVM.Spec.Template.Spec.PriorityClassName)
if err != nil {
return false, fmt.Errorf("Unable to create restore DataVolume manifest: %v", err)
}
if newDataVolume.Annotations == nil {
newDataVolume.Annotations = make(map[string]string)
}
newDataVolume.Annotations[RestoreNameAnnotation] = t.vmRestore.Name
newDataVolume.Annotations[cdiv1.AnnPrePopulated] = "true"
if _, err = t.controller.Client.CdiClient().CdiV1beta1().DataVolumes(restoredVM.Namespace).Create(context.Background(), newDataVolume, metav1.CreateOptions{}); err != nil {
t.controller.Recorder.Eventf(t.vm, corev1.EventTypeWarning, restoreDataVolumeCreateErrorEvent, "Error creating restore DataVolume %s: %v", newDataVolume.Name, err)
return false, fmt.Errorf("Failed to create restore DataVolume: %v", err)
}
return true, nil
}
func (t *vmRestoreTarget) Own(obj metav1.Object) {
if !t.Exists() {
return
}
obj.SetOwnerReferences([]metav1.OwnerReference{
{
APIVersion: kubevirtv1.GroupVersion.String(),
Kind: "VirtualMachine",
Name: t.vm.Name,
UID: t.vm.UID,
Controller: pointer.P(true),
BlockOwnerDeletion: pointer.P(true),
},
})
return
}
func (ctrl *VMRestoreController) deleteObsoleteVolumes(vmRestore *snapshotv1.VirtualMachineRestore, target restoreTarget) error {
for _, dvName := range vmRestore.Status.DeletedDataVolumes {
objKey := cacheKeyFunc(vmRestore.Namespace, dvName)
_, exists, err := ctrl.DataVolumeInformer.GetStore().GetByKey(objKey)
if err != nil {
return err
}
if exists {
err = ctrl.Client.CdiClient().CdiV1beta1().DataVolumes(vmRestore.Namespace).
Delete(context.Background(), dvName, metav1.DeleteOptions{})
if err != nil {
return err
}
}
}
// Garbage-collect original backend PVC if necessary
err := ctrl.deleteObsoleteBackendPVC(vmRestore, target)
if err != nil {
return err
}
return nil
}
func (ctrl *VMRestoreController) deleteObsoleteBackendPVC(vmRestore *snapshotv1.VirtualMachineRestore, target restoreTarget) error {
// Target should always exist at this point, just nil check for safety.
if target.Exists() && backendstorage.IsBackendStorageNeeded(target.VirtualMachine()) {
pvcs, err := ctrl.Client.CoreV1().PersistentVolumeClaims(vmRestore.Namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", restoreCleanupBackendPVCLabel, getCleanupLabelValue(vmRestore)),
})
if err != nil {
return err
}
for _, pvc := range pvcs.Items {
err = ctrl.Client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.Background(), pvc.Name, metav1.DeleteOptions{})
if err != nil {
return err
}
}
}
return nil
}
func (t *vmRestoreTarget) TargetRestored() bool {
return t.Exists() && hasLastRestoreAnnotation(t.vmRestore, t.vm)
}
func (t *vmRestoreTarget) UID() types.UID {
return t.vm.UID
}
func (t *vmRestoreTarget) Exists() bool {
return t.vm != nil
}
func (t *vmRestoreTarget) VirtualMachine() *kubevirtv1.VirtualMachine {
return t.vm
}
func sourceAndTargetAreDifferent(target restoreTarget, vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
targetUID := target.UID()
return vmSnapshot.Status != nil && vmSnapshot.Status.SourceUID != nil && targetUID != *vmSnapshot.Status.SourceUID
}
func (ctrl *VMRestoreController) getVMSnapshot(vmRestore *snapshotv1.VirtualMachineRestore) (*snapshotv1.VirtualMachineSnapshot, error) {
objKey := cacheKeyFunc(vmRestore.Namespace, vmRestore.Spec.VirtualMachineSnapshotName)
obj, exists, err := ctrl.VMSnapshotInformer.GetStore().GetByKey(objKey)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("VMSnapshot %s does not exist", objKey)
}
vmSnapshot := obj.(*snapshotv1.VirtualMachineSnapshot).DeepCopy()
if vmSnapshotFailed(vmSnapshot) {
return nil, fmt.Errorf("VMSnapshot %s failed and is invalid to use", objKey)
} else if !VmSnapshotReady(vmSnapshot) {
return nil, fmt.Errorf("VMSnapshot %s not ready", objKey)
}
if vmSnapshot.Status.VirtualMachineSnapshotContentName == nil {
return nil, fmt.Errorf("no snapshot content name in %s", objKey)
}
return vmSnapshot, nil
}
func (ctrl *VMRestoreController) getSnapshotContent(vmSnapshot *snapshotv1.VirtualMachineSnapshot) (*snapshotv1.VirtualMachineSnapshotContent, error) {
objKey := cacheKeyFunc(vmSnapshot.Namespace, *vmSnapshot.Status.VirtualMachineSnapshotContentName)
obj, exists, err := ctrl.VMSnapshotContentInformer.GetStore().GetByKey(objKey)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("VMSnapshotContent %s does not exist", objKey)
}
vmSnapshotContent := obj.(*snapshotv1.VirtualMachineSnapshotContent).DeepCopy()
if !vmSnapshotContentReady(vmSnapshotContent) {
return nil, fmt.Errorf("VMSnapshotContent %s not ready", objKey)
}
return vmSnapshotContent, nil
}
func (ctrl *VMRestoreController) getVM(namespace, name string) (vm *kubevirtv1.VirtualMachine, err error) {
objKey := cacheKeyFunc(namespace, name)
obj, exists, err := ctrl.VMInformer.GetStore().GetByKey(objKey)
if err != nil || !exists {
return nil, err
}
return obj.(*kubevirtv1.VirtualMachine).DeepCopy(), nil
}
func patchVM(vm *kubevirtv1.VirtualMachine, patches []string) (*kubevirtv1.VirtualMachine, error) {
if len(patches) == 0 {
return vm, nil
}
log.Log.V(3).Object(vm).Infof("patching restore target. VM: %s. patches: %+v", vm.Name, patches)
marshalledVM, err := json.Marshal(vm)
if err != nil {
return vm, fmt.Errorf("cannot marshall VM %s: %v", vm.Name, err)
}
jsonPatch := "[\n" + strings.Join(patches, ",\n") + "\n]"
patch, err := jsonpatch.DecodePatch([]byte(jsonPatch))
if err != nil {
return vm, fmt.Errorf("cannot decode vm patches %s: %v", jsonPatch, err)
}
modifiedMarshalledVM, err := patch.Apply(marshalledVM)
if err != nil {
return vm, fmt.Errorf("failed to apply patch for VM %s: %v", jsonPatch, err)
}
vm = &kubevirtv1.VirtualMachine{}
err = json.Unmarshal(modifiedMarshalledVM, vm)
if err != nil {
return vm, fmt.Errorf("cannot unmarshal modified marshalled vm %s: %v", string(modifiedMarshalledVM), err)
}
log.Log.V(3).Object(vm).Infof("patching restore target completed. Modified VM: %s", string(modifiedMarshalledVM))
return vm, nil
}
func (ctrl *VMRestoreController) getDV(namespace, name string) (*cdiv1.DataVolume, error) {
objKey := cacheKeyFunc(namespace, name)
obj, exists, err := ctrl.DataVolumeInformer.GetStore().GetByKey(objKey)
if err != nil {
return nil, err
}
if !exists {
return nil, nil
}
return obj.(*cdiv1.DataVolume).DeepCopy(), nil
}
func (ctrl *VMRestoreController) getPVC(namespace, name string) (*corev1.PersistentVolumeClaim, error) {
objKey := cacheKeyFunc(namespace, name)
obj, exists, err := ctrl.PVCInformer.GetStore().GetByKey(objKey)
if err != nil {
return nil, err
}
if !exists {
return nil, nil
}
return obj.(*corev1.PersistentVolumeClaim).DeepCopy(), nil
}
func (ctrl *VMRestoreController) getTarget(vmRestore *snapshotv1.VirtualMachineRestore) (restoreTarget, error) {
vmRestore.Spec.Target.DeepCopy()
switch vmRestore.Spec.Target.Kind {
case "VirtualMachine":
vm, err := ctrl.getVM(vmRestore.Namespace, vmRestore.Spec.Target.Name)
if err != nil {
return nil, err
}
return &vmRestoreTarget{
controller: ctrl,
vmRestore: vmRestore,
vm: vm,
}, nil
}
return nil, fmt.Errorf("unknown source %+v", vmRestore.Spec.Target)
}
func (ctrl *VMRestoreController) createRestorePVC(
vmRestore *snapshotv1.VirtualMachineRestore,
target restoreTarget,
volumeBackup *snapshotv1.VolumeBackup,
volumeRestore *snapshotv1.VolumeRestore,
sourceVm *snapshotv1.VirtualMachine,
dvOwner string,
) error {
sourceVmName := sourceVm.Name
sourceVmNamespace := sourceVm.Namespace
if volumeBackup == nil || volumeBackup.VolumeSnapshotName == nil {
log.Log.Errorf("VolumeSnapshot name missing %+v", volumeBackup)
return fmt.Errorf("missing VolumeSnapshot name")
}
if vmRestore == nil {
return fmt.Errorf("missing vmRestore")
}
volumeSnapshot, err := ctrl.VolumeSnapshotProvider.GetVolumeSnapshot(vmRestore.Namespace, *volumeBackup.VolumeSnapshotName)
if err != nil {
return err
}
if volumeSnapshot == nil {
return fmt.Errorf("missing volumeSnapshot %s", *volumeBackup.VolumeSnapshotName)
}
if volumeRestore == nil {
return fmt.Errorf("missing volumeRestore")
}
pvc, err := CreateRestorePVCDefFromVMRestore(vmRestore, volumeRestore.PersistentVolumeClaimName, volumeSnapshot, volumeBackup, sourceVmName, sourceVmNamespace)
if err != nil {
return err
}
if pvc.Annotations == nil {
pvc.Annotations = make(map[string]string)
}
if dvOwner != "" { // PVC is owned by a DV
// By setting this annotation, the CDI will set ownership of the PVC to the DV
pvc.Annotations[populatedForPVCAnnotation] = dvOwner
} else if !isVolumeOwnershipPolicyNone(vmRestore) { // PVC is owned by the VM
if target.Exists() {
target.Own(pvc)
} else if sourcePVCOwnedBySourceVM(volumeBackup, sourceVm) {
pvc.Annotations[restoreOwnedByVMLabel] = vmRestore.Name
}
}
_, err = ctrl.Client.CoreV1().PersistentVolumeClaims(vmRestore.Namespace).Create(context.Background(), pvc, metav1.CreateOptions{})
if err != nil {
return err
}
return nil
}
func sourcePVCOwnedBySourceVM(volumeBackup *snapshotv1.VolumeBackup, sourceVm *snapshotv1.VirtualMachine) bool {
ownerReferences := volumeBackup.PersistentVolumeClaim.OwnerReferences
owned := false
for _, ownerReference := range ownerReferences {
if ownerReference.Kind == "VirtualMachine" && ownerReference.Name == sourceVm.Name && ownerReference.UID == sourceVm.UID {
owned = true
break
}
}
return owned
}
func CreateRestorePVCDef(restorePVCName string, volumeSnapshot *vsv1.VolumeSnapshot, volumeBackup *snapshotv1.VolumeBackup) (*corev1.PersistentVolumeClaim, error) {
if volumeBackup == nil || volumeBackup.VolumeSnapshotName == nil {
return nil, fmt.Errorf("VolumeSnapshot name missing %+v", volumeBackup)
}
apiGroup := vsv1.GroupName
sourcePVC := volumeBackup.PersistentVolumeClaim.DeepCopy()
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: restorePVCName,
Labels: sourcePVC.Labels,
Annotations: sourcePVC.Annotations,
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: sourcePVC.Spec.AccessModes,
Resources: sourcePVC.Spec.Resources,
StorageClassName: sourcePVC.Spec.StorageClassName,
VolumeMode: sourcePVC.Spec.VolumeMode,
DataSource: &corev1.TypedLocalObjectReference{
APIGroup: &apiGroup,
Kind: "VolumeSnapshot",
Name: *volumeBackup.VolumeSnapshotName,
},
DataSourceRef: &corev1.TypedObjectReference{
APIGroup: &apiGroup,
Kind: "VolumeSnapshot",
Name: *volumeBackup.VolumeSnapshotName,
},
},
}
if volumeSnapshot == nil {
return nil, fmt.Errorf("VolumeSnapshot missing %+v", volumeSnapshot)
}
if volumeSnapshot.Status != nil && volumeSnapshot.Status.RestoreSize != nil {
restorePVCSize, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
// Update restore pvc size to be the maximum between the source PVC and the restore size
if !ok || restorePVCSize.Cmp(*volumeSnapshot.Status.RestoreSize) < 0 {
pvc.Spec.Resources.Requests[corev1.ResourceStorage] = *volumeSnapshot.Status.RestoreSize
}
}
for _, prefix := range restoreAnnotationsToDelete {
for anno := range pvc.Annotations {
if strings.HasPrefix(anno, prefix) {
delete(pvc.Annotations, anno)
}
}
}
return pvc, nil
}
func getRestoreAnnotationValue(restore *snapshotv1.VirtualMachineRestore) string {
return fmt.Sprintf("%s-%s", restore.Name, restore.UID)
}
func hasLastRestoreAnnotation(restore *snapshotv1.VirtualMachineRestore, obj metav1.Object) bool {
return obj.GetAnnotations()[lastRestoreAnnotation] == getRestoreAnnotationValue(restore)
}
func setLastRestoreAnnotation(restore *snapshotv1.VirtualMachineRestore, obj metav1.Object) {
if obj.GetAnnotations() == nil {
obj.SetAnnotations(make(map[string]string))
}
obj.GetAnnotations()[lastRestoreAnnotation] = getRestoreAnnotationValue(restore)
}
func getFilteredLabels(labels map[string]string) map[string]string {
excludedKey := backendstorage.PVCPrefix
excludedMap := map[string]struct{}{
excludedKey: {},
}
filteredLabels := make(map[string]string)
for key, value := range labels {
if _, excluded := excludedMap[key]; !excluded {
filteredLabels[key] = value
}
}
return filteredLabels
}
func CreateRestorePVCDefFromVMRestore(vmRestore *snapshotv1.VirtualMachineRestore, restorePVCName string, volumeSnapshot *vsv1.VolumeSnapshot, volumeBackup *snapshotv1.VolumeBackup, sourceVmName, sourceVmNamespace string) (*corev1.PersistentVolumeClaim, error) {
pvc, err := CreateRestorePVCDef(restorePVCName, volumeSnapshot, volumeBackup)
if err != nil {
return nil, err
}
pvc.Labels = getFilteredLabels(pvc.Labels)
if pvc.Annotations == nil {
pvc.Annotations = make(map[string]string)
}
pvc.Labels[restoreSourceNameLabel] = sourceVmName
pvc.Labels[restoreSourceNamespaceLabel] = sourceVmNamespace
pvc.Annotations[RestoreNameAnnotation] = vmRestore.Name
// Mark the ID of the restore job on the PVC
// Used to determine if the PVC has already been deleted for InPlace restores
setLastRestoreAnnotation(vmRestore, pvc)
if err := applyVolumeRestoreOverride(pvc, volumeBackup, vmRestore.Spec.VolumeRestoreOverrides); err != nil {
return nil, err
}
return pvc, nil
}
func updateRestoreCondition(r *snapshotv1.VirtualMachineRestore, c snapshotv1.Condition) {
r.Status.Conditions = updateCondition(r.Status.Conditions, c)
}
// Returns a set of volumes not for restore
// Currently only memory dump volumes should not be restored
func (ctrl *VMRestoreController) volumesNotForRestore(content *snapshotv1.VirtualMachineSnapshotContent) (sets.String, error) {
noRestore := sets.NewString()
volumes, err := storageutils.GetVolumes(content.Spec.Source.VirtualMachine, ctrl.Client)
if err != nil {
return noRestore, err
}
for _, volume := range volumes {
if volume.MemoryDump != nil {
noRestore.Insert(volume.Name)
}
}
return noRestore, nil
}
func getRestoreVolumeBackup(volName string, content *snapshotv1.VirtualMachineSnapshotContent) (*snapshotv1.VolumeBackup, error) {
for _, vb := range content.Spec.VolumeBackups {
if vb.VolumeName == volName {
return &vb, nil
}
}
return &snapshotv1.VolumeBackup{}, fmt.Errorf("volume backup for volume %s not found", volName)
}
// Apply the VolumeRestoreOverride corresponding to a PVC, if it exists
// This applies every override except changing the name, which has to be handled separately because it is used
// to track if the VolumeRestore has happened correctly or not
func applyVolumeRestoreOverride(restorePVC *corev1.PersistentVolumeClaim, volumeBackup *snapshotv1.VolumeBackup, overrides []snapshotv1.VolumeRestoreOverride) error {
if overrides == nil {
return nil
}
if restorePVC == nil {
return fmt.Errorf("missing PersistentVolumeClaim when applying VolumeRestoreOverride")
}
if volumeBackup == nil {
return fmt.Errorf("missing VolumeBackup when applying VolumeRestoreOverride")
}
for _, override := range overrides {
// The volume we're trying to restore has a matching override
if override.VolumeName == volumeBackup.VolumeName {
// Override labels/annotations
if restorePVC.Labels != nil && override.Labels != nil {
maps.Copy(restorePVC.Labels, override.Labels)
}
if restorePVC.Annotations != nil && override.Annotations != nil {
maps.Copy(restorePVC.Annotations, override.Annotations)
}
break
}
}
return nil
}
// isVolumeRestorePolicyInPlace determines if the VolumeRestorePolicy is set to "InPlace"
// If this is the case, we'll have to try to restore the volumes over the original ones, which means
// deleting the original volumes first, if they already exist.
func isVolumeRestorePolicyInPlace(vmRestore *snapshotv1.VirtualMachineRestore) bool {
if vmRestore.Spec.VolumeRestorePolicy == nil {
return false
}
return *vmRestore.Spec.VolumeRestorePolicy == snapshotv1.VolumeRestorePolicyInPlace
}
// prepopulateDataVolume marks a DataVolume as already populated, effectively blocking it
// from creating new PVCs. This function is useful when deleting the PVCs associated with DVs
// during a restore process, as we want to create the new PVCs ourselves and don't want the CDI
// to start reconciliation.
func (ctrl *VMRestoreController) prepopulateDataVolume(namespace, dataVolume, restoreName string) error {
// Mark the DV as being part of a restore
restoreNameAnnotation := fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(RestoreNameAnnotation))
restoreNamePatch := patch.WithAdd(restoreNameAnnotation, restoreName)
// Set the DV as prepopulated so that it doesn't reconcile itself
// As long as the annotation is present (no matter the value), the population process is blocked
prePopulatedAnnotation := fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(cdiv1.AnnPrePopulated))
prePopulatedPatch := patch.WithAdd(prePopulatedAnnotation, dataVolume)
// Craft the patch payload
dvPatch := patch.New(restoreNamePatch, prePopulatedPatch)
patchBytes, err := dvPatch.GeneratePayload()
if err != nil {
return err
}
// Patch the DataVolume
_, err = ctrl.Client.CdiClient().CdiV1beta1().DataVolumes(namespace).Patch(context.Background(), dataVolume, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
return err
}
// isVolumeOwnershipPolicyNone determines if the VolumeOwnershipPolicy is set to "None"
// If this is the case, the restored volumes will not be owned by the restored VM
func isVolumeOwnershipPolicyNone(vmRestore *snapshotv1.VirtualMachineRestore) bool {
if vmRestore.Spec.VolumeOwnershipPolicy == nil {
return false
}
return *vmRestore.Spec.VolumeOwnershipPolicy == snapshotv1.VolumeOwnershipPolicyNone
}
func setLegacyFirmwareUUID(vm *kubevirtv1.VirtualMachine) {
if vm.Spec.Template.Spec.Domain.Firmware == nil {
vm.Spec.Template.Spec.Domain.Firmware = &kubevirtv1.Firmware{}
}
if vm.Spec.Template.Spec.Domain.Firmware.UUID == "" {
vm.Spec.Template.Spec.Domain.Firmware.UUID = firmware.CalculateLegacyUUID(vm.Name)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package snapshot
import (
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
kubevirtv1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
watchutil "kubevirt.io/kubevirt/pkg/virt-controller/watch/util"
)
// VMRestoreController is resonsible for restoring VMs
type VMRestoreController struct {
Client kubecli.KubevirtClient
VMRestoreInformer cache.SharedIndexInformer
VMSnapshotInformer cache.SharedIndexInformer
VMSnapshotContentInformer cache.SharedIndexInformer
VMInformer cache.SharedIndexInformer
VMIInformer cache.SharedIndexInformer
DataVolumeInformer cache.SharedIndexInformer
PVCInformer cache.SharedIndexInformer
StorageClassInformer cache.SharedIndexInformer
CRInformer cache.SharedIndexInformer
VolumeSnapshotProvider VolumeSnapshotProvider
Recorder record.EventRecorder
vmRestoreQueue workqueue.TypedRateLimitingInterface[string]
}
// Init initializes the restore controller
func (ctrl *VMRestoreController) Init() error {
ctrl.vmRestoreQueue = workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-restore-vmrestore"},
)
_, err := ctrl.VMRestoreInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleVMRestore,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVMRestore(newObj) },
DeleteFunc: ctrl.handleVMRestore,
},
)
if err != nil {
return err
}
_, err = ctrl.DataVolumeInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleDataVolume,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleDataVolume(newObj) },
},
)
if err != nil {
return err
}
_, err = ctrl.PVCInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handlePVC,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handlePVC(newObj) },
},
)
if err != nil {
return err
}
_, err = ctrl.VMInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleVM,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVM(newObj) },
},
)
if err != nil {
return err
}
return nil
}
// Run the controller
func (ctrl *VMRestoreController) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer ctrl.vmRestoreQueue.ShutDown()
log.Log.Info("Starting restore controller.")
defer log.Log.Info("Shutting down restore controller.")
if !cache.WaitForCacheSync(
stopCh,
ctrl.VMRestoreInformer.HasSynced,
ctrl.VMSnapshotInformer.HasSynced,
ctrl.VMSnapshotContentInformer.HasSynced,
ctrl.VMInformer.HasSynced,
ctrl.VMIInformer.HasSynced,
ctrl.DataVolumeInformer.HasSynced,
ctrl.PVCInformer.HasSynced,
) {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < threadiness; i++ {
go wait.Until(ctrl.vmRestoreWorker, time.Second, stopCh)
}
<-stopCh
return nil
}
func (ctrl *VMRestoreController) vmRestoreWorker() {
for ctrl.processVMRestoreWorkItem() {
}
}
func (ctrl *VMRestoreController) processVMRestoreWorkItem() bool {
return watchutil.ProcessWorkItem(ctrl.vmRestoreQueue, func(key string) (time.Duration, error) {
log.Log.V(3).Infof("vmRestore worker processing key [%s]", key)
storeObj, exists, err := ctrl.VMRestoreInformer.GetStore().GetByKey(key)
if !exists || err != nil {
return 0, err
}
vmRestore, ok := storeObj.(*snapshotv1.VirtualMachineRestore)
if !ok {
return 0, fmt.Errorf("unexpected resource %+v", storeObj)
}
return ctrl.updateVMRestore(vmRestore.DeepCopy())
})
}
func (ctrl *VMRestoreController) handleVMRestore(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if vmRestore, ok := obj.(*snapshotv1.VirtualMachineRestore); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(vmRestore)
if err != nil {
log.Log.Errorf("failed to get key from object: %v, %v", err, vmRestore)
return
}
log.Log.V(3).Infof("enqueued %q for sync", objName)
ctrl.vmRestoreQueue.Add(objName)
}
}
func (ctrl *VMRestoreController) handleDataVolume(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if dv, ok := obj.(*cdiv1.DataVolume); ok {
restoreName, ok := dv.Annotations[RestoreNameAnnotation]
if !ok {
return
}
objName := cacheKeyFunc(dv.Namespace, restoreName)
log.Log.V(3).Infof("Handling DV %s/%s, Restore %s", dv.Namespace, dv.Name, objName)
ctrl.vmRestoreQueue.Add(objName)
}
}
func (ctrl *VMRestoreController) handlePVC(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if pvc, ok := obj.(*corev1.PersistentVolumeClaim); ok {
restoreName, ok := pvc.Annotations[RestoreNameAnnotation]
if !ok {
return
}
objName := cacheKeyFunc(pvc.Namespace, restoreName)
log.Log.V(3).Infof("Handling PVC %s/%s, Restore %s", pvc.Namespace, pvc.Name, objName)
ctrl.vmRestoreQueue.Add(objName)
}
}
func (ctrl *VMRestoreController) handleVM(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if vm, ok := obj.(*kubevirtv1.VirtualMachine); ok {
k, _ := cache.MetaNamespaceKeyFunc(vm)
keys, err := ctrl.VMRestoreInformer.GetIndexer().IndexKeys("vm", k)
if err != nil {
utilruntime.HandleError(err)
return
}
for _, k := range keys {
ctrl.vmRestoreQueue.Add(k)
}
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package snapshot
import (
"context"
"fmt"
"strings"
"time"
vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/equality"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
kubevirtv1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
metrics "kubevirt.io/kubevirt/pkg/monitoring/metrics/virt-controller"
"kubevirt.io/kubevirt/pkg/pointer"
storageutils "kubevirt.io/kubevirt/pkg/storage/utils"
)
const (
vmSnapshotFinalizer = "snapshot.kubevirt.io/vmsnapshot-protection"
vmSnapshotContentFinalizer = "snapshot.kubevirt.io/vmsnapshotcontent-protection"
snapshotSourceNameLabel = "snapshot.kubevirt.io/source-vm-name"
snapshotSourceNamespaceLabel = "snapshot.kubevirt.io/source-vm-namespace"
defaultVolumeSnapshotClassAnnotation = "snapshot.storage.kubernetes.io/is-default-class"
vmSnapshotContentCreateEvent = "SuccessfulVirtualMachineSnapshotContentCreate"
volumeSnapshotCreateEvent = "SuccessfulVolumeSnapshotCreate"
volumeSnapshotMissingEvent = "VolumeSnapshotMissing"
vmSnapshotDeadlineExceededError = "snapshot deadline exceeded"
snapshotRetryInterval = 5 * time.Second
contentDeletionInterval = 5 * time.Second
)
// Indication messages
var snapshotIndicationMessages = map[snapshotv1.Indication]string{
snapshotv1.VMSnapshotOnlineSnapshotIndication: "Snapshot taken while the VM was running. Consistency depends on guest-agent quiescing.",
snapshotv1.VMSnapshotGuestAgentIndication: "Guest agent was active and attempted to quiesce the filesystem for application consistency.",
snapshotv1.VMSnapshotNoGuestAgentIndication: "Guest agent was not available. Snapshot is crash-consistent and may not be application-consistent.",
snapshotv1.VMSnapshotQuiesceFailedIndication: "Guest agent failed to quiesce the filesystem. Snapshot is crash-consistent and may not be application-consistent.",
snapshotv1.VMSnapshotPausedIndication: "Snapshot taken while the VM was paused. Snapshot is crash-consistent and may not be application-consistent.",
}
func VmSnapshotReady(vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
return vmSnapshot.Status != nil && vmSnapshot.Status.ReadyToUse != nil && *vmSnapshot.Status.ReadyToUse
}
func vmSnapshotContentCreated(vmSnapshotContent *snapshotv1.VirtualMachineSnapshotContent) bool {
return vmSnapshotContent.Status != nil && vmSnapshotContent.Status.CreationTime != nil
}
func vmSnapshotContentReady(vmSnapshotContent *snapshotv1.VirtualMachineSnapshotContent) bool {
return vmSnapshotContent.Status != nil && vmSnapshotContent.Status.ReadyToUse != nil && *vmSnapshotContent.Status.ReadyToUse
}
func vmSnapshotError(vmSnapshot *snapshotv1.VirtualMachineSnapshot) *snapshotv1.Error {
if vmSnapshot != nil && vmSnapshot.Status != nil && vmSnapshot.Status.Error != nil {
return vmSnapshot.Status.Error
}
return nil
}
func vmSnapshotFailed(vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
return vmSnapshot != nil && vmSnapshot.Status != nil && vmSnapshot.Status.Phase == snapshotv1.Failed
}
func vmSnapshotSucceeded(vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
return vmSnapshot.Status != nil && vmSnapshot.Status.Phase == snapshotv1.Succeeded
}
func vmSnapshotProgressing(vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
return !vmSnapshotFailed(vmSnapshot) && !vmSnapshotSucceeded(vmSnapshot)
}
func deleteContentPolicy(vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
return vmSnapshot.Spec.DeletionPolicy == nil ||
*vmSnapshot.Spec.DeletionPolicy == snapshotv1.VirtualMachineSnapshotContentDelete
}
func shouldDeleteContent(vmSnapshot *snapshotv1.VirtualMachineSnapshot, content *snapshotv1.VirtualMachineSnapshotContent) bool {
return deleteContentPolicy(vmSnapshot) || !vmSnapshotContentReady(content)
}
func vmSnapshotContentDeleting(content *snapshotv1.VirtualMachineSnapshotContent) bool {
return content != nil && content.DeletionTimestamp != nil
}
func vmSnapshotDeleting(vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
return vmSnapshot != nil && vmSnapshot.DeletionTimestamp != nil
}
func vmSnapshotTerminating(vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
return vmSnapshotDeleting(vmSnapshot) || vmSnapshotDeadlineExceeded(vmSnapshot)
}
func contentDeletedIfNeeded(vmSnapshot *snapshotv1.VirtualMachineSnapshot, content *snapshotv1.VirtualMachineSnapshotContent) bool {
return content == nil || !shouldDeleteContent(vmSnapshot, content)
}
// can unlock source either if the snapshot was completed or if snapshot deleted/exceeded deadline and the content is deleted if it should be
func canUnlockSource(vmSnapshot *snapshotv1.VirtualMachineSnapshot, content *snapshotv1.VirtualMachineSnapshotContent) bool {
return !vmSnapshotProgressing(vmSnapshot) ||
(vmSnapshotTerminating(vmSnapshot) && contentDeletedIfNeeded(vmSnapshot, content))
}
func vmSnapshotDeadlineExceeded(vmSnapshot *snapshotv1.VirtualMachineSnapshot) bool {
if vmSnapshotFailed(vmSnapshot) {
return true
}
if !vmSnapshotProgressing(vmSnapshot) {
return false
}
return timeUntilDeadline(vmSnapshot) < 0
}
func GetVMSnapshotContentName(vmSnapshot *snapshotv1.VirtualMachineSnapshot) string {
if vmSnapshot.Status != nil && vmSnapshot.Status.VirtualMachineSnapshotContentName != nil {
return *vmSnapshot.Status.VirtualMachineSnapshotContentName
}
return fmt.Sprintf("%s-%s", "vmsnapshot-content", vmSnapshot.UID)
}
func translateError(e *vsv1.VolumeSnapshotError) *snapshotv1.Error {
if e == nil {
return nil
}
return &snapshotv1.Error{
Message: e.Message,
Time: e.Time,
}
}
func (ctrl *VMSnapshotController) updateVMSnapshot(vmSnapshot *snapshotv1.VirtualMachineSnapshot) (time.Duration, error) {
log.Log.V(3).Infof("Updating VirtualMachineSnapshot %s/%s", vmSnapshot.Namespace, vmSnapshot.Name)
var retry time.Duration
source, err := ctrl.getSnapshotSource(vmSnapshot)
if err != nil {
return 0, err
}
content, err := ctrl.getContent(vmSnapshot)
if err != nil {
return 0, err
}
terminating := vmSnapshotTerminating(vmSnapshot)
if !terminating {
vmSnapshot, err = ctrl.addSnapshotFinalizer(vmSnapshot)
if err != nil {
return 0, err
}
}
canRemoveFinalizer := true
// Make sure status is initialized before doing anything
if vmSnapshot.Status != nil {
if source != nil {
if vmSnapshotProgressing(vmSnapshot) && !terminating {
// attempt to lock source
// if fails will attempt again when source is updated
if !source.Locked() {
locked, err := source.Lock()
if err != nil {
return 0, err
}
log.Log.V(3).Infof("Attempt to lock source returned: %t", locked)
retry = snapshotRetryInterval
} else {
// create content if does not exist
if content == nil {
if err := ctrl.createContent(vmSnapshot); err != nil {
return 0, err
}
}
}
canRemoveFinalizer = false
} else {
if canUnlockSource(vmSnapshot, content) {
if _, err := source.Unlock(); err != nil {
return 0, err
}
}
canRemoveFinalizer = !source.Locked()
}
}
}
if terminating && content != nil {
// Delete content if that's the policy or if the snapshot
// is marked to be deleted and the content is not ready yet
// - no point of keeping an unready content
if shouldDeleteContent(vmSnapshot, content) {
log.Log.V(2).Infof("Deleting vmsnapshotcontent %s/%s", content.Namespace, content.Name)
err = ctrl.Client.VirtualMachineSnapshotContent(vmSnapshot.Namespace).Delete(context.Background(), content.Name, metav1.DeleteOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
return 0, err
}
} else {
log.Log.V(2).Infof("NOT deleting vmsnapshotcontent %s/%s", content.Namespace, content.Name)
}
}
vmSnapshot, err = ctrl.updateSnapshotStatus(vmSnapshot, source)
if err != nil {
return 0, err
}
if vmSnapshotDeleting(vmSnapshot) && canRemoveFinalizer {
vmSnapshot, err = ctrl.removeSnapshotFinalizer(vmSnapshot)
if err != nil {
return 0, err
}
}
if retry == 0 {
return timeUntilDeadline(vmSnapshot), nil
}
return retry, nil
}
func (ctrl *VMSnapshotController) unfreezeSource(vmSnapshot *snapshotv1.VirtualMachineSnapshot) error {
if vmSnapshot == nil {
return nil
}
source, err := ctrl.getSnapshotSource(vmSnapshot)
if err != nil {
return err
}
if source != nil {
if err := source.Unfreeze(); err != nil {
return err
}
}
return nil
}
func generateFinalizerPatch(test, replace []string) ([]byte, error) {
return patch.New(
patch.WithTest("/metadata/finalizers", test),
patch.WithReplace("/metadata/finalizers", replace),
).GeneratePayload()
}
func (ctrl *VMSnapshotController) addSnapshotFinalizer(snapshot *snapshotv1.VirtualMachineSnapshot) (*snapshotv1.VirtualMachineSnapshot, error) {
if controller.HasFinalizer(snapshot, vmSnapshotFinalizer) {
return snapshot, nil
}
cpy := snapshot.DeepCopy()
controller.AddFinalizer(cpy, vmSnapshotFinalizer)
patch, err := generateFinalizerPatch(snapshot.Finalizers, cpy.Finalizers)
if err != nil {
return snapshot, err
}
return ctrl.Client.VirtualMachineSnapshot(cpy.Namespace).Patch(context.Background(), cpy.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
}
func (ctrl *VMSnapshotController) removeSnapshotFinalizer(snapshot *snapshotv1.VirtualMachineSnapshot) (*snapshotv1.VirtualMachineSnapshot, error) {
if !controller.HasFinalizer(snapshot, vmSnapshotFinalizer) {
return snapshot, nil
}
cpy := snapshot.DeepCopy()
controller.RemoveFinalizer(cpy, vmSnapshotFinalizer)
patch, err := generateFinalizerPatch(snapshot.Finalizers, cpy.Finalizers)
if err != nil {
return snapshot, err
}
return ctrl.Client.VirtualMachineSnapshot(cpy.Namespace).Patch(context.Background(), cpy.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
}
func (ctrl *VMSnapshotController) removeContentFinalizer(content *snapshotv1.VirtualMachineSnapshotContent) (*snapshotv1.VirtualMachineSnapshotContent, error) {
if !controller.HasFinalizer(content, vmSnapshotContentFinalizer) {
return content, nil
}
cpy := content.DeepCopy()
controller.RemoveFinalizer(cpy, vmSnapshotContentFinalizer)
patch, err := generateFinalizerPatch(content.Finalizers, cpy.Finalizers)
if err != nil {
return content, err
}
return ctrl.Client.VirtualMachineSnapshotContent(cpy.Namespace).Patch(context.Background(), cpy.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
}
func (ctrl *VMSnapshotController) updateVMSnapshotContent(content *snapshotv1.VirtualMachineSnapshotContent) (time.Duration, error) {
log.Log.V(3).Infof("Updating VirtualMachineSnapshotContent %s/%s", content.Namespace, content.Name)
var volumeSnapshotStatus []snapshotv1.VolumeSnapshotStatus
var deletedSnapshots, skippedSnapshots []string
var didFreeze bool
vmSnapshot, err := ctrl.getVMSnapshot(content)
if err != nil {
return 0, err
}
if vmSnapshot == nil || vmSnapshotTerminating(vmSnapshot) {
err = ctrl.unfreezeSource(vmSnapshot)
if err != nil {
log.Log.Warningf("Failed to unfreeze source for snapshot content %s/%s: %+v",
content.Namespace, content.Name, err)
}
content, err = ctrl.removeContentFinalizer(content)
if err != nil {
return 0, err
}
if vmSnapshot != nil && shouldDeleteContent(vmSnapshot, content) {
return 0, nil
}
}
if vmSnapshotContentDeleting(content) {
log.Log.V(3).Infof("Content deleting %s/%s", content.Namespace, content.Name)
return contentDeletionInterval, nil
}
contentCpy := content.DeepCopy()
if contentCpy.Status == nil {
contentCpy.Status = &snapshotv1.VirtualMachineSnapshotContentStatus{}
}
contentCreated := vmSnapshotContentCreated(content)
for _, volumeBackup := range content.Spec.VolumeBackups {
if volumeBackup.VolumeSnapshotName == nil {
continue
}
vsName := *volumeBackup.VolumeSnapshotName
volumeSnapshot, err := ctrl.GetVolumeSnapshot(content.Namespace, vsName)
if err != nil {
return 0, err
}
if volumeSnapshot == nil {
// check if content was created and snapshot was deleted
if contentCreated {
log.Log.Warningf("VolumeSnapshot %s no longer exists", vsName)
ctrl.Recorder.Eventf(
content,
corev1.EventTypeWarning,
volumeSnapshotMissingEvent,
"VolumeSnapshot %s no longer exists",
vsName,
)
deletedSnapshots = append(deletedSnapshots, vsName)
continue
}
if vmSnapshot == nil || vmSnapshotDeleting(vmSnapshot) {
log.Log.V(3).Infof("Not creating snapshot %s because vm snapshot is deleted", vsName)
skippedSnapshots = append(skippedSnapshots, vsName)
continue
}
if !didFreeze {
source, err := ctrl.getSnapshotSource(vmSnapshot)
if err != nil {
return 0, err
}
if source == nil {
return 0, fmt.Errorf("unable to get snapshot source")
}
if err := source.Freeze(); err != nil {
contentCpy.Status.Error = &snapshotv1.Error{
Time: currentTime(),
Message: pointer.P(err.Error()),
}
contentCpy.Status.ReadyToUse = pointer.P(false)
// Retry again in 5 seconds
return 5 * time.Second, ctrl.updateVmSnapshotContentStatus(content, contentCpy)
}
// assuming that VM is frozen once Freeze() returns
// which should be the case
// if Freeze() were async, we'd have to return
// and only continue when source.Frozen() == true
didFreeze = true
}
volumeSnapshot, err = ctrl.createVolumeSnapshot(content, volumeBackup)
if err != nil {
return 0, err
}
}
vss := snapshotv1.VolumeSnapshotStatus{
VolumeSnapshotName: volumeSnapshot.Name,
}
if volumeSnapshot.Status != nil {
vss.ReadyToUse = volumeSnapshot.Status.ReadyToUse
vss.CreationTime = volumeSnapshot.Status.CreationTime
vss.Error = translateError(volumeSnapshot.Status.Error)
}
volumeSnapshotStatus = append(volumeSnapshotStatus, vss)
}
created, ready := true, true
errorMessage := ""
if len(deletedSnapshots) > 0 {
created, ready = false, false
errorMessage = fmt.Sprintf("VolumeSnapshots (%s) missing", strings.Join(deletedSnapshots, ","))
} else if len(skippedSnapshots) > 0 {
created, ready = false, false
errorMessage = fmt.Sprintf("VolumeSnapshots (%s) skipped because vm snapshot is deleted", strings.Join(skippedSnapshots, ","))
} else {
for _, vss := range volumeSnapshotStatus {
if vss.CreationTime == nil {
created = false
}
if vss.ReadyToUse == nil || !*vss.ReadyToUse {
ready = false
}
if vss.Error != nil && vss.Error.Message != nil {
errorMessage = fmt.Sprintf("VolumeSnapshot %s error: %s", vss.VolumeSnapshotName, *vss.Error.Message)
break
}
}
}
if created && contentCpy.Status.CreationTime == nil {
contentCpy.Status.CreationTime = currentTime()
err = ctrl.unfreezeSource(vmSnapshot)
if err != nil {
return 0, err
}
}
if errorMessage != "" && !ready {
if shouldUpdateError(contentCpy, errorMessage) {
contentCpy.Status.Error = &snapshotv1.Error{
Time: currentTime(),
Message: &errorMessage,
}
}
} else if errorMessage == "" {
contentCpy.Status.Error = nil
}
contentCpy.Status.ReadyToUse = &ready
contentCpy.Status.VolumeSnapshotStatus = volumeSnapshotStatus
return 0, ctrl.updateVmSnapshotContentStatus(content, contentCpy)
}
func shouldUpdateError(contentCpy *snapshotv1.VirtualMachineSnapshotContent, errorMessage string) bool {
return contentCpy.Status.Error == nil || contentCpy.Status.Error.Message == nil || *contentCpy.Status.Error.Message != errorMessage
}
func (ctrl *VMSnapshotController) updateVmSnapshotContentStatus(oldContent, newContent *snapshotv1.VirtualMachineSnapshotContent) error {
if !equality.Semantic.DeepEqual(oldContent.Status, newContent.Status) {
if _, err := ctrl.Client.VirtualMachineSnapshotContent(newContent.Namespace).UpdateStatus(context.Background(), newContent, metav1.UpdateOptions{}); err != nil {
return err
}
}
return nil
}
func (ctrl *VMSnapshotController) createVolumeSnapshot(
content *snapshotv1.VirtualMachineSnapshotContent,
volumeBackup snapshotv1.VolumeBackup,
) (*vsv1.VolumeSnapshot, error) {
log.Log.Infof("Attempting to create VolumeSnapshot %s", *volumeBackup.VolumeSnapshotName)
sc := volumeBackup.PersistentVolumeClaim.Spec.StorageClassName
if sc == nil {
return nil, fmt.Errorf("%s/%s VolumeSnapshot requested but no storage class",
content.Namespace, volumeBackup.PersistentVolumeClaim.Name)
}
volumeSnapshotClass, err := ctrl.getVolumeSnapshotClassName(*sc)
if err != nil || volumeSnapshotClass == "" {
log.Log.Warningf("Couldn't find VolumeSnapshotClass for %s", *sc)
return nil, err
}
t := true
snapshot := &vsv1.VolumeSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: *volumeBackup.VolumeSnapshotName,
Labels: map[string]string{
snapshotSourceNameLabel: content.Spec.Source.VirtualMachine.Name,
snapshotSourceNamespaceLabel: content.Spec.Source.VirtualMachine.Namespace,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: snapshotv1.SchemeGroupVersion.String(),
Kind: "VirtualMachineSnapshotContent",
Name: content.Name,
UID: content.UID,
Controller: &t,
BlockOwnerDeletion: &t,
},
},
},
Spec: vsv1.VolumeSnapshotSpec{
Source: vsv1.VolumeSnapshotSource{
PersistentVolumeClaimName: &volumeBackup.PersistentVolumeClaim.Name,
},
VolumeSnapshotClassName: &volumeSnapshotClass,
},
}
volumeSnapshot, err := ctrl.Client.KubernetesSnapshotClient().SnapshotV1().
VolumeSnapshots(content.Namespace).
Create(context.Background(), snapshot, metav1.CreateOptions{})
if err != nil {
return nil, err
}
ctrl.Recorder.Eventf(
content,
corev1.EventTypeNormal,
volumeSnapshotCreateEvent,
"Successfully created VolumeSnapshot %s",
snapshot.Name,
)
return volumeSnapshot, nil
}
func (ctrl *VMSnapshotController) getSnapshotSource(vmSnapshot *snapshotv1.VirtualMachineSnapshot) (snapshotSource, error) {
switch vmSnapshot.Spec.Source.Kind {
case "VirtualMachine":
vm, err := ctrl.getVM(vmSnapshot)
if err != nil {
return nil, err
}
if vm == nil {
return nil, nil
}
vmSource := &vmSnapshotSource{
vm: vm,
snapshot: vmSnapshot,
controller: ctrl,
}
if err := vmSource.UpdateSourceState(); err != nil {
return nil, err
}
return vmSource, nil
}
return nil, fmt.Errorf("unknown source %+v", vmSnapshot.Spec.Source)
}
func (ctrl *VMSnapshotController) createContent(vmSnapshot *snapshotv1.VirtualMachineSnapshot) error {
source, err := ctrl.getSnapshotSource(vmSnapshot)
if err != nil {
return err
}
var volumeBackups []snapshotv1.VolumeBackup
pvcs, err := source.PersistentVolumeClaims()
if err != nil {
return err
}
for volumeName, pvcName := range pvcs {
pvc, err := ctrl.getSnapshotPVC(vmSnapshot.Namespace, pvcName)
if err != nil {
return err
}
if pvc == nil {
log.Log.Warningf("No snapshot PVC for %s/%s", vmSnapshot.Namespace, pvcName)
continue
}
volumeSnapshotName := fmt.Sprintf("vmsnapshot-%s-volume-%s", vmSnapshot.UID, volumeName)
vb := snapshotv1.VolumeBackup{
VolumeName: volumeName,
PersistentVolumeClaim: snapshotv1.PersistentVolumeClaim{
ObjectMeta: *getSimplifiedMetaObject(pvc.ObjectMeta),
Spec: *pvc.Spec.DeepCopy(),
},
VolumeSnapshotName: &volumeSnapshotName,
}
volumeBackups = append(volumeBackups, vb)
}
sourceSpec, err := source.Spec()
if err != nil {
return err
}
content := &snapshotv1.VirtualMachineSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: GetVMSnapshotContentName(vmSnapshot),
Namespace: vmSnapshot.Namespace,
Finalizers: []string{vmSnapshotContentFinalizer},
},
Spec: snapshotv1.VirtualMachineSnapshotContentSpec{
VirtualMachineSnapshotName: &vmSnapshot.Name,
Source: sourceSpec,
VolumeBackups: volumeBackups,
},
}
_, err = ctrl.Client.VirtualMachineSnapshotContent(content.Namespace).Create(context.Background(), content, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
ctrl.Recorder.Eventf(
vmSnapshot,
corev1.EventTypeNormal,
vmSnapshotContentCreateEvent,
"Successfully created VirtualMachineSnapshotContent %s",
content.Name,
)
return nil
}
func (ctrl *VMSnapshotController) getSnapshotPVC(namespace, volumeName string) (*corev1.PersistentVolumeClaim, error) {
obj, exists, err := ctrl.PVCInformer.GetStore().GetByKey(cacheKeyFunc(namespace, volumeName))
if err != nil {
return nil, err
}
if !exists {
return nil, nil
}
pvc := obj.(*corev1.PersistentVolumeClaim).DeepCopy()
if pvc.Status.Phase != corev1.ClaimBound {
log.Log.Warningf("Unbound PVC %s/%s", pvc.Namespace, pvc.Name)
return nil, nil
}
if pvc.Spec.StorageClassName == nil {
log.Log.Warningf("No storage class for PVC %s/%s", pvc.Namespace, pvc.Name)
return nil, nil
}
volumeSnapshotClass, err := ctrl.getVolumeSnapshotClassName(*pvc.Spec.StorageClassName)
if err != nil {
return nil, err
}
if volumeSnapshotClass != "" {
return pvc, nil
}
log.Log.Warningf("No VolumeSnapshotClass for %s", *pvc.Spec.StorageClassName)
return nil, nil
}
func (ctrl *VMSnapshotController) getVolumeSnapshotClassName(storageClassName string) (string, error) {
obj, exists, err := ctrl.StorageClassInformer.GetStore().GetByKey(storageClassName)
if !exists || err != nil {
return "", err
}
storageClass := obj.(*storagev1.StorageClass).DeepCopy()
obj, exists, err = ctrl.StorageProfileInformer.GetStore().GetByKey(storageClassName)
if err != nil {
return "", err
}
if exists {
storageProfile := obj.(*cdiv1.StorageProfile)
defaultSCSnapClass := storageProfile.Status.SnapshotClass
if defaultSCSnapClass != nil && *defaultSCSnapClass != "" {
if vsc, err := ctrl.getVolumeSnapshotClass(*defaultSCSnapClass); err != nil || vsc == nil {
return "", err
}
return *defaultSCSnapClass, nil
}
}
var matches []vsv1.VolumeSnapshotClass
volumeSnapshotClasses := ctrl.getVolumeSnapshotClasses()
for _, volumeSnapshotClass := range volumeSnapshotClasses {
if volumeSnapshotClass.Driver == storageClass.Provisioner {
matches = append(matches, volumeSnapshotClass)
}
}
if len(matches) == 0 {
log.Log.V(3).Infof("No VolumeSnapshotClass for %s", storageClassName)
return "", nil
}
if len(matches) == 1 {
return matches[0].Name, nil
}
for _, volumeSnapshotClass := range matches {
for annotation := range volumeSnapshotClass.Annotations {
if annotation == defaultVolumeSnapshotClassAnnotation {
return volumeSnapshotClass.Name, nil
}
}
}
return "", fmt.Errorf("%d matching VolumeSnapshotClasses for %s", len(matches), storageClassName)
}
func (ctrl *VMSnapshotController) updateSnapshotStatus(vmSnapshot *snapshotv1.VirtualMachineSnapshot, source snapshotSource) (*snapshotv1.VirtualMachineSnapshot, error) {
f := false
vmSnapshotCpy := vmSnapshot.DeepCopy()
if vmSnapshotCpy.Status == nil {
vmSnapshotCpy.Status = &snapshotv1.VirtualMachineSnapshotStatus{
ReadyToUse: &f,
}
}
content, err := ctrl.getContent(vmSnapshot)
if err != nil {
return vmSnapshot, err
}
if source != nil {
uid := source.UID()
vmSnapshotCpy.Status.SourceUID = &uid
}
if content != nil && content.Status != nil {
// content exists and is initialized
vmSnapshotCpy.Status.VirtualMachineSnapshotContentName = &content.Name
vmSnapshotCpy.Status.CreationTime = content.Status.CreationTime
vmSnapshotCpy.Status.ReadyToUse = content.Status.ReadyToUse
vmSnapshotCpy.Status.Error = content.Status.Error
}
// terminal phase 1 - failed
if vmSnapshotDeadlineExceeded(vmSnapshotCpy) {
vmSnapshotCpy.Status.Phase = snapshotv1.Failed
updateSnapshotCondition(vmSnapshotCpy, newFailureCondition(corev1.ConditionTrue, vmSnapshotDeadlineExceededError))
updateSnapshotCondition(vmSnapshotCpy, newProgressingCondition(corev1.ConditionFalse, "Operation failed"))
// terminal phase 2 - succeeded
} else if vmSnapshotSucceeded(vmSnapshotCpy) || vmSnapshotCpy.Status.CreationTime != nil {
vmSnapshotCpy.Status.Phase = snapshotv1.Succeeded
updateSnapshotCondition(vmSnapshotCpy, newProgressingCondition(corev1.ConditionFalse, "Operation complete"))
if err := ctrl.updateSnapshotSnapshotableVolumes(vmSnapshotCpy, content); err != nil {
return nil, err
}
metrics.HandleSucceededVMSnapshot(vmSnapshotCpy)
} else {
vmSnapshotCpy.Status.Phase = snapshotv1.InProgress
if source != nil {
if source.Locked() {
updateSnapshotCondition(vmSnapshotCpy, newProgressingCondition(corev1.ConditionTrue, source.LockMsg()))
} else {
updateSnapshotCondition(vmSnapshotCpy, newProgressingCondition(corev1.ConditionFalse, source.LockMsg()))
}
updateSnapshotSourceIndications(vmSnapshotCpy, source)
} else {
updateSnapshotCondition(vmSnapshotCpy, newProgressingCondition(corev1.ConditionFalse, "Source does not exist"))
}
if vmSnapshotDeleting(vmSnapshotCpy) {
vmSnapshotCpy.Status.Phase = snapshotv1.Deleting
updateSnapshotCondition(vmSnapshotCpy, newProgressingCondition(corev1.ConditionFalse, "VM snapshot is deleting"))
}
}
if VmSnapshotReady(vmSnapshotCpy) {
updateSnapshotCondition(vmSnapshotCpy, newReadyCondition(corev1.ConditionTrue, "Ready"))
} else {
updateSnapshotCondition(vmSnapshotCpy, newReadyCondition(corev1.ConditionFalse, "Not ready"))
}
if vmSnapshotError(vmSnapshotCpy) != nil {
updateSnapshotCondition(vmSnapshotCpy, newProgressingCondition(corev1.ConditionFalse, "In error state"))
}
if !equality.Semantic.DeepEqual(vmSnapshot.Status, vmSnapshotCpy.Status) {
if _, err := ctrl.Client.VirtualMachineSnapshot(vmSnapshotCpy.Namespace).UpdateStatus(context.Background(), vmSnapshotCpy, metav1.UpdateOptions{}); err != nil {
return nil, err
}
return vmSnapshotCpy, nil
}
return vmSnapshot, nil
}
// IndicationMessage returns a human-readable message for each indication
func IndicationMessage(indication snapshotv1.Indication) string {
if message, ok := snapshotIndicationMessages[indication]; ok {
return message
}
return "Unknown indication"
}
// updateSnapshotSourceIndications updates both the old and new indication fields
func updateSnapshotSourceIndications(snapshot *snapshotv1.VirtualMachineSnapshot, source snapshotSource) {
if source.Online() {
indications := sets.New(snapshot.Status.Indications...)
indications = sets.Insert(indications, snapshotv1.VMSnapshotOnlineSnapshotIndication)
if source.Paused() {
indications = sets.Insert(indications, snapshotv1.VMSnapshotPausedIndication)
} else if source.GuestAgent() {
indications = sets.Insert(indications, snapshotv1.VMSnapshotGuestAgentIndication)
snapErr := snapshot.Status.Error
if snapErr != nil && snapErr.Message != nil &&
strings.Contains(*snapErr.Message, failedFreezeMsg) {
indications = sets.Insert(indications, snapshotv1.VMSnapshotQuiesceFailedIndication)
}
} else {
indications = sets.Insert(indications, snapshotv1.VMSnapshotNoGuestAgentIndication)
}
indicationsList := sets.List(indications)
// Update the old field for backward compatibility
snapshot.Status.Indications = indicationsList
// Update the new sourceIndications field
var sourceIndications []snapshotv1.SourceIndication
for _, indication := range indicationsList {
sourceIndications = append(sourceIndications, snapshotv1.SourceIndication{
Indication: indication,
Message: IndicationMessage(indication),
})
}
snapshot.Status.SourceIndications = sourceIndications
} else {
// For offline snapshots, no indications are needed
snapshot.Status.Indications = nil
snapshot.Status.SourceIndications = nil
}
}
func (ctrl *VMSnapshotController) updateSnapshotSnapshotableVolumes(snapshot *snapshotv1.VirtualMachineSnapshot, content *snapshotv1.VirtualMachineSnapshotContent) error {
if content == nil {
return nil
}
vm := content.Spec.Source.VirtualMachine
if vm == nil || vm.Spec.Template == nil {
return nil
}
volumes, err := storageutils.GetVolumes(vm, ctrl.Client, storageutils.WithAllVolumes)
if err != nil && !storageutils.IsErrNoBackendPVC(err) {
return err
}
volumeBackups := make(map[string]bool)
for _, volumeBackup := range content.Spec.VolumeBackups {
volumeBackups[volumeBackup.VolumeName] = true
}
var excludedVolumes []string
var includedVolumes []string
for _, volume := range volumes {
if _, ok := volumeBackups[volume.Name]; ok {
includedVolumes = append(includedVolumes, volume.Name)
} else {
excludedVolumes = append(excludedVolumes, volume.Name)
}
}
snapshot.Status.SnapshotVolumes = &snapshotv1.SnapshotVolumesLists{
IncludedVolumes: includedVolumes,
ExcludedVolumes: excludedVolumes,
}
return nil
}
func (ctrl *VMSnapshotController) updateVolumeSnapshotStatuses(vm *kubevirtv1.VirtualMachine) error {
log.Log.V(3).Infof("Update volume snapshot status for VM [%s/%s]", vm.Namespace, vm.Name)
vmCopy := vm.DeepCopy()
volumes, err := storageutils.GetVolumes(vmCopy, ctrl.Client, storageutils.WithAllVolumes)
if err != nil && !storageutils.IsErrNoBackendPVC(err) {
return err
}
var statuses []kubevirtv1.VolumeSnapshotStatus
for _, volume := range volumes {
log.Log.V(3).Infof("Update volume snapshot status for volume [%s]", volume.Name)
status := ctrl.getVolumeSnapshotStatus(vmCopy, &volume)
statuses = append(statuses, status)
}
vmCopy.Status.VolumeSnapshotStatuses = statuses
if equality.Semantic.DeepEqual(vmCopy.Status.VolumeSnapshotStatuses, vm.Status.VolumeSnapshotStatuses) {
return nil
}
_, err = ctrl.Client.VirtualMachine(vmCopy.Namespace).UpdateStatus(context.Background(), vmCopy, metav1.UpdateOptions{})
return err
}
func (ctrl *VMSnapshotController) getVolumeSnapshotStatus(vm *kubevirtv1.VirtualMachine, volume *kubevirtv1.Volume) kubevirtv1.VolumeSnapshotStatus {
snapshottable := ctrl.isVolumeSnapshottable(volume)
if !snapshottable {
return kubevirtv1.VolumeSnapshotStatus{
Name: volume.Name,
Enabled: false,
Reason: fmt.Sprintf("Snapshot is not supported for this volumeSource type [%s]", volume.Name),
}
}
sc, err := ctrl.getVolumeStorageClass(vm.Namespace, volume)
if err != nil {
return kubevirtv1.VolumeSnapshotStatus{Name: volume.Name, Enabled: false, Reason: err.Error()}
}
snap, err := ctrl.getVolumeSnapshotClassName(sc)
if err != nil {
return kubevirtv1.VolumeSnapshotStatus{Name: volume.Name, Enabled: false, Reason: err.Error()}
}
if snap == "" {
return kubevirtv1.VolumeSnapshotStatus{
Name: volume.Name,
Enabled: false,
Reason: fmt.Sprintf("No VolumeSnapshotClass: Volume snapshots are not configured for this StorageClass [%s] [%s]", sc, volume.Name),
}
}
return kubevirtv1.VolumeSnapshotStatus{Name: volume.Name, Enabled: true}
}
func (ctrl *VMSnapshotController) isVolumeSnapshottable(volume *kubevirtv1.Volume) bool {
return volume.VolumeSource.PersistentVolumeClaim != nil ||
volume.VolumeSource.DataVolume != nil ||
volume.VolumeSource.MemoryDump != nil
}
func (ctrl *VMSnapshotController) getStorageClassNameForPVC(pvcKey string) (string, error) {
obj, exists, err := ctrl.PVCInformer.GetStore().GetByKey(pvcKey)
if err != nil {
return "", err
}
if !exists {
log.Log.V(3).Infof("PVC not in cache [%s]", pvcKey)
return "", fmt.Errorf("PVC not found")
}
pvc := obj.(*corev1.PersistentVolumeClaim)
if pvc.Spec.StorageClassName != nil {
return *pvc.Spec.StorageClassName, nil
}
return "", nil
}
func (ctrl *VMSnapshotController) getVolumeStorageClass(namespace string, volume *kubevirtv1.Volume) (string, error) {
// TODO Add Ephemeral (add "|| volume.VolumeSource.Ephemeral != nil" to the `if` below)
if volume.VolumeSource.PersistentVolumeClaim != nil {
pvcKey := cacheKeyFunc(namespace, volume.VolumeSource.PersistentVolumeClaim.ClaimName)
storageClassName, err := ctrl.getStorageClassNameForPVC(pvcKey)
if err != nil {
return "", err
}
return storageClassName, nil
}
if volume.VolumeSource.MemoryDump != nil {
pvcKey := cacheKeyFunc(namespace, volume.VolumeSource.MemoryDump.ClaimName)
storageClassName, err := ctrl.getStorageClassNameForPVC(pvcKey)
if err != nil {
return "", err
}
return storageClassName, nil
}
if volume.VolumeSource.DataVolume != nil {
storageClassName, err := ctrl.getStorageClassNameForDV(namespace, volume.VolumeSource.DataVolume.Name)
if err != nil {
return "", err
}
return storageClassName, nil
}
return "", fmt.Errorf("volume type has no StorageClass defined")
}
func (ctrl *VMSnapshotController) getStorageClassNameForDV(namespace string, dvName string) (string, error) {
// First, look up DV's StorageClass
key := cacheKeyFunc(namespace, dvName)
obj, exists, err := ctrl.DVInformer.GetStore().GetByKey(key)
if err != nil {
return "", err
}
if !exists {
log.Log.V(3).Infof("DV is not in cache [%s]", key)
return ctrl.getStorageClassNameForPVC(key)
}
dv := obj.(*cdiv1.DataVolume)
if dv.Spec.PVC != nil && dv.Spec.PVC.StorageClassName != nil && *dv.Spec.PVC.StorageClassName != "" {
return *dv.Spec.PVC.StorageClassName, nil
}
// Second, see if DV is owned by a VM, and if so, if the DVTemplate has a StorageClass
for _, or := range dv.OwnerReferences {
if or.Kind == "VirtualMachine" {
vmKey := cacheKeyFunc(namespace, or.Name)
storeObj, exists, err := ctrl.VMInformer.GetStore().GetByKey(vmKey)
if err != nil || !exists {
continue
}
vm, ok := storeObj.(*kubevirtv1.VirtualMachine)
if !ok {
continue
}
for _, dvTemplate := range vm.Spec.DataVolumeTemplates {
if dvTemplate.Name == dvName && dvTemplate.Spec.PVC != nil && dvTemplate.Spec.PVC.StorageClassName != nil {
return *dvTemplate.Spec.PVC.StorageClassName, nil
}
}
}
}
// Third, if everything else fails, wait for PVC to read its StorageClass
// NOTE: this will give possibly incorrect `false` value for the status until the
// PVC is ready.
return ctrl.getStorageClassNameForPVC(key)
}
func (ctrl *VMSnapshotController) getVM(vmSnapshot *snapshotv1.VirtualMachineSnapshot) (*kubevirtv1.VirtualMachine, error) {
vmName := vmSnapshot.Spec.Source.Name
obj, exists, err := ctrl.VMInformer.GetStore().GetByKey(cacheKeyFunc(vmSnapshot.Namespace, vmName))
if err != nil {
return nil, err
}
if !exists {
return nil, nil
}
return obj.(*kubevirtv1.VirtualMachine).DeepCopy(), nil
}
func (ctrl *VMSnapshotController) getContent(vmSnapshot *snapshotv1.VirtualMachineSnapshot) (*snapshotv1.VirtualMachineSnapshotContent, error) {
contentName := GetVMSnapshotContentName(vmSnapshot)
obj, exists, err := ctrl.VMSnapshotContentInformer.GetStore().GetByKey(cacheKeyFunc(vmSnapshot.Namespace, contentName))
if err != nil {
return nil, err
}
if !exists {
return nil, nil
}
return obj.(*snapshotv1.VirtualMachineSnapshotContent).DeepCopy(), nil
}
func (ctrl *VMSnapshotController) getVMSnapshot(vmSnapshotContent *snapshotv1.VirtualMachineSnapshotContent) (*snapshotv1.VirtualMachineSnapshot, error) {
vmSnapshotName := vmSnapshotContent.Spec.VirtualMachineSnapshotName
if vmSnapshotName == nil {
return nil, fmt.Errorf("VirtualMachineSnapshotName is not initialized in vm snapshot content")
}
obj, exists, err := ctrl.VMSnapshotInformer.GetStore().GetByKey(cacheKeyFunc(vmSnapshotContent.Namespace, *vmSnapshotName))
if err != nil || !exists {
return nil, err
}
return obj.(*snapshotv1.VirtualMachineSnapshot).DeepCopy(), nil
}
func (ctrl *VMSnapshotController) getVMI(vm *kubevirtv1.VirtualMachine) (*kubevirtv1.VirtualMachineInstance, bool, error) {
key, err := controller.KeyFunc(vm)
if err != nil {
return nil, false, err
}
obj, exists, err := ctrl.VMIInformer.GetStore().GetByKey(key)
if err != nil || !exists {
return nil, exists, err
}
return obj.(*kubevirtv1.VirtualMachineInstance).DeepCopy(), true, nil
}
func updateSnapshotCondition(ss *snapshotv1.VirtualMachineSnapshot, c snapshotv1.Condition) {
ss.Status.Conditions = updateCondition(ss.Status.Conditions, c)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package snapshot
import (
"fmt"
"sync"
"time"
vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
kubevirtv1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/controller"
watchutil "kubevirt.io/kubevirt/pkg/virt-controller/watch/util"
)
const (
unexpectedResourceFmt = "unexpected resource %+v"
failedKeyFromObjectFmt = "failed to get key from object: %v, %v"
enqueuedForSyncFmt = "enqueued %q for sync"
)
const (
volumeSnapshotCRD = "volumesnapshots.snapshot.storage.k8s.io"
volumeSnapshotClassCRD = "volumesnapshotclasses.snapshot.storage.k8s.io"
)
type informerFunc func(kubecli.KubevirtClient, time.Duration) cache.SharedIndexInformer
type dynamicInformer struct {
stopCh chan struct{}
informer cache.SharedIndexInformer
mutex sync.Mutex
informerFunc informerFunc
}
// VMSnapshotController is resonsible for snapshotting VMs
type VMSnapshotController struct {
Client kubecli.KubevirtClient
VMSnapshotInformer cache.SharedIndexInformer
VMSnapshotContentInformer cache.SharedIndexInformer
VMInformer cache.SharedIndexInformer
VMIInformer cache.SharedIndexInformer
StorageClassInformer cache.SharedIndexInformer
StorageProfileInformer cache.SharedIndexInformer
PVCInformer cache.SharedIndexInformer
CRDInformer cache.SharedIndexInformer
PodInformer cache.SharedIndexInformer
DVInformer cache.SharedIndexInformer
CRInformer cache.SharedIndexInformer
Recorder record.EventRecorder
ResyncPeriod time.Duration
vmSnapshotQueue workqueue.TypedRateLimitingInterface[string]
vmSnapshotContentQueue workqueue.TypedRateLimitingInterface[string]
crdQueue workqueue.TypedRateLimitingInterface[string]
vmSnapshotStatusQueue workqueue.TypedRateLimitingInterface[string]
vmQueue workqueue.TypedRateLimitingInterface[string]
dynamicInformerMap map[string]*dynamicInformer
eventHandlerMap map[string]cache.ResourceEventHandlerFuncs
}
var supportedCRDVersions = []string{"v1"}
// Init initializes the snapshot controller
func (ctrl *VMSnapshotController) Init() error {
ctrl.vmSnapshotQueue = workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-snapshot-vmsnapshot"},
)
ctrl.vmSnapshotContentQueue = workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-snapshot-vmsnapshotcontent"},
)
ctrl.crdQueue = workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-snapshot-crd"},
)
ctrl.vmSnapshotStatusQueue = workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-snapshot-vmsnashotstatus"},
)
ctrl.vmQueue = workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-snapshot-vm"},
)
ctrl.dynamicInformerMap = map[string]*dynamicInformer{
volumeSnapshotCRD: {informerFunc: controller.VolumeSnapshotInformer},
volumeSnapshotClassCRD: {informerFunc: controller.VolumeSnapshotClassInformer},
}
ctrl.eventHandlerMap = map[string]cache.ResourceEventHandlerFuncs{
volumeSnapshotCRD: {
AddFunc: ctrl.handleVolumeSnapshot,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVolumeSnapshot(newObj) },
DeleteFunc: ctrl.handleVolumeSnapshot,
},
volumeSnapshotClassCRD: {
AddFunc: ctrl.handleVolumeSnapshotClass,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVolumeSnapshotClass(newObj) },
DeleteFunc: ctrl.handleVolumeSnapshotClass,
},
}
_, err := ctrl.VMSnapshotInformer.AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleVMSnapshot,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVMSnapshot(newObj) },
},
ctrl.ResyncPeriod,
)
if err != nil {
return err
}
_, err = ctrl.VMSnapshotContentInformer.AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleVMSnapshotContent,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVMSnapshotContent(newObj) },
DeleteFunc: ctrl.handleVMSnapshotContent,
},
ctrl.ResyncPeriod,
)
if err != nil {
return err
}
_, err = ctrl.VMInformer.AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleVM,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVM(newObj) },
DeleteFunc: ctrl.handleVM,
},
ctrl.ResyncPeriod,
)
if err != nil {
return err
}
_, err = ctrl.VMIInformer.AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleVMI,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVMI(newObj) },
DeleteFunc: ctrl.handleVMI,
},
ctrl.ResyncPeriod,
)
if err != nil {
return err
}
_, err = ctrl.CRDInformer.AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleCRD,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleCRD(newObj) },
DeleteFunc: ctrl.handleCRD,
},
ctrl.ResyncPeriod,
)
if err != nil {
return err
}
_, err = ctrl.DVInformer.AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleDV,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleDV(newObj) },
DeleteFunc: ctrl.handleDV,
},
ctrl.ResyncPeriod,
)
if err != nil {
return err
}
_, err = ctrl.PVCInformer.AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handlePVC,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handlePVC(newObj) },
DeleteFunc: ctrl.handlePVC,
},
ctrl.ResyncPeriod,
)
if err != nil {
return err
}
return nil
}
// Run the controller
func (ctrl *VMSnapshotController) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer ctrl.vmSnapshotQueue.ShutDown()
defer ctrl.vmSnapshotContentQueue.ShutDown()
defer ctrl.crdQueue.ShutDown()
defer ctrl.vmSnapshotStatusQueue.ShutDown()
defer ctrl.vmQueue.ShutDown()
log.Log.Info("Starting snapshot controller.")
defer log.Log.Info("Shutting down snapshot controller.")
if !cache.WaitForCacheSync(
stopCh,
ctrl.VMSnapshotInformer.HasSynced,
ctrl.VMSnapshotContentInformer.HasSynced,
ctrl.VMInformer.HasSynced,
ctrl.VMIInformer.HasSynced,
ctrl.CRDInformer.HasSynced,
ctrl.PodInformer.HasSynced,
ctrl.PVCInformer.HasSynced,
ctrl.DVInformer.HasSynced,
ctrl.StorageClassInformer.HasSynced,
ctrl.StorageProfileInformer.HasSynced,
) {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < threadiness; i++ {
go wait.Until(ctrl.crdWorker, time.Second, stopCh)
}
log.Log.Infof("CRD queue length: %d", ctrl.crdQueue.Len())
for ql := ctrl.crdQueue.Len(); ql > 0; ql = ctrl.crdQueue.Len() {
log.Log.Infof("Waiting for empty CRD queue, currently: %d", ql)
time.Sleep(2 * time.Second)
}
for i := 0; i < threadiness; i++ {
go wait.Until(ctrl.vmSnapshotWorker, time.Second, stopCh)
go wait.Until(ctrl.vmSnapshotContentWorker, time.Second, stopCh)
go wait.Until(ctrl.vmSnapshotStatusWorker, time.Second, stopCh)
go wait.Until(ctrl.vmWorker, time.Second, stopCh)
}
<-stopCh
for crd := range ctrl.dynamicInformerMap {
if _, err := ctrl.deleteDynamicInformer(crd); err != nil {
log.Log.Warningf("failed to delete %s informer: %v", crd, err)
}
}
return nil
}
func (ctrl *VMSnapshotController) vmSnapshotWorker() {
for ctrl.processVMSnapshotWorkItem() {
}
}
func (ctrl *VMSnapshotController) vmSnapshotContentWorker() {
for ctrl.processVMSnapshotContentWorkItem() {
}
}
func (ctrl *VMSnapshotController) crdWorker() {
for ctrl.processCRDWorkItem() {
}
}
func (ctrl *VMSnapshotController) vmSnapshotStatusWorker() {
for ctrl.processVMSnapshotStatusWorkItem() {
}
}
func (ctrl *VMSnapshotController) vmWorker() {
for ctrl.processVMWorkItem() {
}
}
func (ctrl *VMSnapshotController) processVMSnapshotWorkItem() bool {
return watchutil.ProcessWorkItem(ctrl.vmSnapshotQueue, func(key string) (time.Duration, error) {
log.Log.V(3).Infof("vmSnapshot worker processing key [%s]", key)
storeObj, exists, err := ctrl.VMSnapshotInformer.GetStore().GetByKey(key)
if !exists || err != nil {
return 0, err
}
vmSnapshot, ok := storeObj.(*snapshotv1.VirtualMachineSnapshot)
if !ok {
return 0, fmt.Errorf(unexpectedResourceFmt, storeObj)
}
return ctrl.updateVMSnapshot(vmSnapshot.DeepCopy())
})
}
func (ctrl *VMSnapshotController) processVMSnapshotContentWorkItem() bool {
return watchutil.ProcessWorkItem(ctrl.vmSnapshotContentQueue, func(key string) (time.Duration, error) {
log.Log.V(3).Infof("vmSnapshotContent worker processing key [%s]", key)
storeObj, exists, err := ctrl.VMSnapshotContentInformer.GetStore().GetByKey(key)
if !exists || err != nil {
return 0, err
}
vmSnapshotContent, ok := storeObj.(*snapshotv1.VirtualMachineSnapshotContent)
if !ok {
return 0, fmt.Errorf(unexpectedResourceFmt, storeObj)
}
return ctrl.updateVMSnapshotContent(vmSnapshotContent.DeepCopy())
})
}
func (ctrl *VMSnapshotController) processCRDWorkItem() bool {
return watchutil.ProcessWorkItem(ctrl.crdQueue, func(key string) (time.Duration, error) {
log.Log.V(3).Infof("CRD worker processing key [%s]", key)
storeObj, exists, err := ctrl.CRDInformer.GetStore().GetByKey(key)
if err != nil {
return 0, err
}
if !exists {
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return 0, err
}
return ctrl.deleteDynamicInformer(name)
}
crd, ok := storeObj.(*extv1.CustomResourceDefinition)
if !ok {
return 0, fmt.Errorf(unexpectedResourceFmt, storeObj)
}
if crd.DeletionTimestamp != nil {
return ctrl.deleteDynamicInformer(crd.Name)
}
return ctrl.ensureDynamicInformer(crd.Name)
})
}
func (ctrl *VMSnapshotController) processVMSnapshotStatusWorkItem() bool {
return watchutil.ProcessWorkItem(ctrl.vmSnapshotStatusQueue, func(key string) (time.Duration, error) {
log.Log.V(3).Infof("vmSnapshotStatus worker processing VM [%s]", key)
storeObj, exists, err := ctrl.VMInformer.GetStore().GetByKey(key)
if err != nil {
return 0, err
}
if exists {
vm, ok := storeObj.(*kubevirtv1.VirtualMachine)
if !ok {
return 0, fmt.Errorf(unexpectedResourceFmt, storeObj)
}
if err = ctrl.updateVolumeSnapshotStatuses(vm); err != nil {
return 0, err
}
}
return 0, nil
})
}
func (ctrl *VMSnapshotController) processVMWorkItem() bool {
return watchutil.ProcessWorkItem(ctrl.vmQueue, func(key string) (time.Duration, error) {
log.Log.V(3).Infof("vm worker processing VM [%s]", key)
storeObj, exists, err := ctrl.VMInformer.GetStore().GetByKey(key)
if err != nil {
return 0, err
}
if exists {
vm, ok := storeObj.(*kubevirtv1.VirtualMachine)
if !ok {
return 0, fmt.Errorf(unexpectedResourceFmt, storeObj)
}
ctrl.handleVM(vm)
}
return 0, nil
})
}
func (ctrl *VMSnapshotController) handleVMSnapshot(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if vmSnapshot, ok := obj.(*snapshotv1.VirtualMachineSnapshot); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(vmSnapshot)
if err != nil {
log.Log.Errorf(failedKeyFromObjectFmt, err, vmSnapshot)
return
}
log.Log.V(3).Infof(enqueuedForSyncFmt, objName)
ctrl.vmSnapshotQueue.Add(objName)
}
}
func (ctrl *VMSnapshotController) handleVMSnapshotContent(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if content, ok := obj.(*snapshotv1.VirtualMachineSnapshotContent); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(content)
if err != nil {
log.Log.Errorf(failedKeyFromObjectFmt, err, content)
return
}
if content.Spec.VirtualMachineSnapshotName != nil {
k := cacheKeyFunc(content.Namespace, *content.Spec.VirtualMachineSnapshotName)
log.Log.V(5).Infof("enqueued vmsnapshot %q for sync", k)
ctrl.vmSnapshotQueue.Add(k)
}
log.Log.V(5).Infof(enqueuedForSyncFmt, objName)
ctrl.vmSnapshotContentQueue.Add(objName)
}
}
func (ctrl *VMSnapshotController) handleVM(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if vm, ok := obj.(*kubevirtv1.VirtualMachine); ok {
k, _ := cache.MetaNamespaceKeyFunc(vm)
keys, err := ctrl.VMSnapshotInformer.GetIndexer().IndexKeys("vm", k)
if err != nil {
utilruntime.HandleError(err)
return
}
for _, k := range keys {
ctrl.vmSnapshotQueue.Add(k)
}
key, err := controller.KeyFunc(vm)
if err != nil {
log.Log.Error("Failed to extract vmKey from VirtualMachine.")
} else {
ctrl.vmSnapshotStatusQueue.Add(key)
}
}
}
func (ctrl *VMSnapshotController) handleVMI(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if vmi, ok := obj.(*kubevirtv1.VirtualMachineInstance); ok {
k, _ := cache.MetaNamespaceKeyFunc(vmi)
keys, err := ctrl.VMSnapshotInformer.GetIndexer().IndexKeys("vm", k)
if err != nil {
utilruntime.HandleError(err)
return
}
for _, k := range keys {
ctrl.vmSnapshotQueue.Add(k)
}
}
}
func (ctrl *VMSnapshotController) handleVolumeSnapshotClass(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if _, ok := obj.(*vsv1.VolumeSnapshotClass); ok {
for _, vmKey := range ctrl.VMInformer.GetStore().ListKeys() {
ctrl.vmQueue.Add(vmKey)
}
}
}
func (ctrl *VMSnapshotController) handleCRD(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if crd, ok := obj.(*extv1.CustomResourceDefinition); ok {
_, ok = ctrl.dynamicInformerMap[crd.Name]
if ok {
hasSupportedVersion := false
for _, crdVersion := range crd.Spec.Versions {
for _, supportedVersion := range supportedCRDVersions {
if crdVersion.Name == supportedVersion && crdVersion.Served {
hasSupportedVersion = true
}
}
}
if !hasSupportedVersion {
return
}
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(crd)
if err != nil {
log.Log.Errorf(failedKeyFromObjectFmt, err, crd)
return
}
log.Log.V(3).Infof(enqueuedForSyncFmt, objName)
ctrl.crdQueue.Add(objName)
}
}
}
func (ctrl *VMSnapshotController) handleVolumeSnapshot(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if volumeSnapshot, ok := obj.(*vsv1.VolumeSnapshot); ok {
k, _ := cache.MetaNamespaceKeyFunc(volumeSnapshot)
keys, err := ctrl.VMSnapshotContentInformer.GetIndexer().IndexKeys("volumeSnapshot", k)
if err != nil {
utilruntime.HandleError(err)
return
}
for _, k := range keys {
ctrl.vmSnapshotContentQueue.Add(k)
}
}
}
func (ctrl *VMSnapshotController) handleDV(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if dv, ok := obj.(*cdiv1.DataVolume); ok {
key, _ := cache.MetaNamespaceKeyFunc(dv)
log.Log.V(3).Infof("Processing DV %s", key)
for _, idx := range []string{"dv", "pvc"} {
keys, err := ctrl.VMInformer.GetIndexer().IndexKeys(idx, key)
if err != nil {
utilruntime.HandleError(err)
return
}
for _, k := range keys {
ctrl.vmSnapshotStatusQueue.Add(k)
}
}
}
}
func (ctrl *VMSnapshotController) handlePVC(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
if pvc, ok := obj.(*corev1.PersistentVolumeClaim); ok {
key, _ := cache.MetaNamespaceKeyFunc(pvc)
log.Log.V(3).Infof("Processing PVC %s", key)
keys, err := ctrl.VMInformer.GetIndexer().IndexKeys("pvc", key)
if err != nil {
utilruntime.HandleError(err)
return
}
for _, k := range keys {
ctrl.vmSnapshotStatusQueue.Add(k)
}
}
}
func (ctrl *VMSnapshotController) getVolumeSnapshotClass(vscName string) (*vsv1.VolumeSnapshotClass, error) {
di := ctrl.dynamicInformerMap[volumeSnapshotClassCRD]
di.mutex.Lock()
defer di.mutex.Unlock()
if di.informer == nil {
return nil, nil
}
obj, exists, err := di.informer.GetStore().GetByKey(vscName)
if !exists || err != nil {
return nil, err
}
return obj.(*vsv1.VolumeSnapshotClass).DeepCopy(), nil
}
func (ctrl *VMSnapshotController) getVolumeSnapshotClasses() []vsv1.VolumeSnapshotClass {
di := ctrl.dynamicInformerMap[volumeSnapshotClassCRD]
di.mutex.Lock()
defer di.mutex.Unlock()
if di.informer == nil {
return nil
}
var vscs []vsv1.VolumeSnapshotClass
objs := di.informer.GetStore().List()
for _, obj := range objs {
vsc := obj.(*vsv1.VolumeSnapshotClass).DeepCopy()
vscs = append(vscs, *vsc)
}
return vscs
}
func (ctrl *VMSnapshotController) ensureDynamicInformer(name string) (time.Duration, error) {
di, ok := ctrl.dynamicInformerMap[name]
if !ok {
return 0, fmt.Errorf("unexpected CRD %s", name)
}
di.mutex.Lock()
defer di.mutex.Unlock()
if di.informer != nil {
return 0, nil
}
di.stopCh = make(chan struct{})
di.informer = di.informerFunc(ctrl.Client, ctrl.ResyncPeriod)
handlerFuncs, ok := ctrl.eventHandlerMap[name]
if ok {
di.informer.AddEventHandlerWithResyncPeriod(handlerFuncs, ctrl.ResyncPeriod)
}
go di.informer.Run(di.stopCh)
cache.WaitForCacheSync(di.stopCh, di.informer.HasSynced)
log.Log.Infof("Successfully created informer for %q", name)
return 0, nil
}
func (ctrl *VMSnapshotController) deleteDynamicInformer(name string) (time.Duration, error) {
di, ok := ctrl.dynamicInformerMap[name]
if !ok {
return 0, fmt.Errorf("unexpected CRD %s", name)
}
di.mutex.Lock()
defer di.mutex.Unlock()
if di.informer == nil {
return 0, nil
}
close(di.stopCh)
di.stopCh = nil
di.informer = nil
log.Log.Infof("Successfully deleted informer for %q", name)
return 0, nil
}
type VolumeSnapshotProvider interface {
GetVolumeSnapshot(string, string) (*vsv1.VolumeSnapshot, error)
}
func (ctrl *VMSnapshotController) GetVolumeSnapshot(namespace, name string) (*vsv1.VolumeSnapshot, error) {
di := ctrl.dynamicInformerMap[volumeSnapshotCRD]
di.mutex.Lock()
defer di.mutex.Unlock()
if di.informer == nil {
return nil, nil
}
key := fmt.Sprintf("%s/%s", namespace, name)
obj, exists, err := di.informer.GetStore().GetByKey(key)
if !exists || err != nil {
return nil, err
}
return obj.(*vsv1.VolumeSnapshot).DeepCopy(), nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package snapshot
import (
"context"
"encoding/json"
"errors"
"fmt"
"maps"
"slices"
"strings"
"time"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kubevirtv1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/controller"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
storageutils "kubevirt.io/kubevirt/pkg/storage/utils"
utils "kubevirt.io/kubevirt/pkg/util"
watchutil "kubevirt.io/kubevirt/pkg/virt-controller/watch/util"
launcherapi "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
const (
sourceFinalizer = "snapshot.kubevirt.io/snapshot-source-protection"
failedFreezeMsg = "Failed freezing vm"
)
var (
ErrVolumeDoesntExist = errors.New("volume doesnt exist")
ErrVolumeNotBound = errors.New("volume not bound")
ErrVolumeNotPopulated = errors.New("volume not populated")
)
type snapshotSource interface {
UpdateSourceState() error
UID() types.UID
Locked() bool
LockMsg() string
Lock() (bool, error)
Unlock() (bool, error)
Online() bool
Paused() bool
GuestAgent() bool
Frozen() bool
Freeze() error
Unfreeze() error
Spec() (snapshotv1.SourceSpec, error)
PersistentVolumeClaims() (map[string]string, error)
}
type sourceState struct {
online bool
paused bool
guestAgent bool
frozen bool
locked bool
lockMsg string
}
type vmSnapshotSource struct {
vm *kubevirtv1.VirtualMachine
snapshot *snapshotv1.VirtualMachineSnapshot
controller *VMSnapshotController
state *sourceState
}
func (s *vmSnapshotSource) UpdateSourceState() error {
vmi, exists, err := s.controller.getVMI(s.vm)
if err != nil {
return err
}
online := exists
condManager := controller.NewVirtualMachineInstanceConditionManager()
paused := exists && condManager.HasConditionWithStatus(vmi, kubevirtv1.VirtualMachineInstancePaused, corev1.ConditionTrue)
guestAgent := exists && condManager.HasCondition(vmi, kubevirtv1.VirtualMachineInstanceAgentConnected)
locked := s.vm.Status.SnapshotInProgress != nil &&
*s.vm.Status.SnapshotInProgress == s.snapshot.Name &&
controller.HasFinalizer(s.vm, sourceFinalizer)
lockMsg := "Source not locked"
if locked {
lockMsg = "Source locked and operation in progress"
}
frozen := exists && vmi.Status.FSFreezeStatus == launcherapi.FSFrozen
s.state = &sourceState{
online: online,
paused: paused,
guestAgent: guestAgent,
locked: locked,
frozen: frozen,
lockMsg: lockMsg,
}
return nil
}
func (s *vmSnapshotSource) UID() types.UID {
return s.vm.UID
}
func (s *vmSnapshotSource) Locked() bool {
return s.state.locked
}
func (s *vmSnapshotSource) LockMsg() string {
return s.state.lockMsg
}
func (s *vmSnapshotSource) Lock() (bool, error) {
if s.Locked() {
return true, nil
}
pvcNames, err := s.pvcNames()
if err != nil {
if storageutils.IsErrNoBackendPVC(err) {
// No backend pvc when we should have one, lets wait
// TODO: Improve this error handling
return false, nil
}
return false, err
}
err = s.verifyVolumes(pvcNames.List())
if err != nil {
switch errors.Unwrap(err) {
case ErrVolumeDoesntExist, ErrVolumeNotBound, ErrVolumeNotPopulated:
s.state.lockMsg += fmt.Sprintf(" source %s/%s %s", s.vm.Namespace, s.vm.Name, err.Error())
log.Log.Error(s.state.lockMsg)
return false, nil
default:
return false, err
}
}
if !s.Online() {
pods, err := watchutil.PodsUsingPVCs(s.controller.PodInformer, s.vm.Namespace, pvcNames)
if err != nil {
return false, err
}
if len(pods) > 0 {
s.state.lockMsg += fmt.Sprintf(" source is offline but %d pods using PVCs %+v", len(pods), slices.Collect(maps.Keys(pvcNames)))
log.Log.V(3).Info(s.state.lockMsg)
return false, nil
}
}
if s.vm.Status.SnapshotInProgress != nil && *s.vm.Status.SnapshotInProgress != s.snapshot.Name {
s.state.lockMsg += fmt.Sprintf(" snapshot %q in progress", *s.vm.Status.SnapshotInProgress)
log.Log.V(3).Info(s.state.lockMsg)
return false, nil
}
vmCopy := s.vm.DeepCopy()
if vmCopy.Status.SnapshotInProgress == nil {
vmCopy.Status.SnapshotInProgress = &s.snapshot.Name
vmCopy, err = s.controller.Client.VirtualMachine(vmCopy.Namespace).UpdateStatus(context.Background(), vmCopy, metav1.UpdateOptions{})
if err != nil {
return false, err
}
}
if !controller.HasFinalizer(vmCopy, sourceFinalizer) {
log.Log.Infof("Adding VM snapshot finalizer to %s", s.vm.Name)
controller.AddFinalizer(vmCopy, sourceFinalizer)
patch, err := generateFinalizerPatch(s.vm.Finalizers, vmCopy.Finalizers)
if err != nil {
return false, err
}
vmCopy, err = s.controller.Client.VirtualMachine(vmCopy.Namespace).Patch(context.Background(), vmCopy.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
if err != nil {
return false, err
}
}
s.vm = vmCopy
s.state.locked = true
s.state.lockMsg = "Source locked and operation in progress"
return true, nil
}
func (s *vmSnapshotSource) Unlock() (bool, error) {
if s.vm.Status.SnapshotInProgress == nil || *s.vm.Status.SnapshotInProgress != s.snapshot.Name {
return false, nil
}
var err error
vmCopy := s.vm.DeepCopy()
if controller.HasFinalizer(vmCopy, sourceFinalizer) {
controller.RemoveFinalizer(vmCopy, sourceFinalizer)
patch, err := generateFinalizerPatch(s.vm.Finalizers, vmCopy.Finalizers)
if err != nil {
return false, err
}
vmCopy, err = s.controller.Client.VirtualMachine(vmCopy.Namespace).Patch(context.Background(), vmCopy.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
if err != nil {
return false, err
}
}
vmCopy.Status.SnapshotInProgress = nil
vmCopy, err = s.controller.Client.VirtualMachine(vmCopy.Namespace).UpdateStatus(context.Background(), vmCopy, metav1.UpdateOptions{})
if err != nil {
return false, err
}
s.vm = vmCopy
return true, nil
}
func (s *vmSnapshotSource) verifyVolumes(pvcNames []string) error {
for _, pvcName := range pvcNames {
obj, exists, err := s.controller.PVCInformer.GetStore().GetByKey(cacheKeyFunc(s.vm.Namespace, pvcName))
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%w: %s", ErrVolumeDoesntExist, pvcName)
}
pvc := obj.(*corev1.PersistentVolumeClaim).DeepCopy()
if pvc.Status.Phase != corev1.ClaimBound {
return fmt.Errorf("%w: %s", ErrVolumeNotBound, pvcName)
}
getDVFunc := func(name, namespace string) (*cdiv1.DataVolume, error) {
dv, err := storagetypes.GetDataVolumeFromCache(namespace, name, s.controller.DVInformer.GetStore())
if err != nil {
return nil, err
}
if dv == nil {
return nil, fmt.Errorf("Data volume %s/%s doesnt exist", namespace, name)
}
return dv, err
}
if populated, err := cdiv1.IsPopulated(pvc, getDVFunc); !populated || err != nil {
if err != nil {
return err
}
return fmt.Errorf("%w: %s", ErrVolumeNotPopulated, pvcName)
}
}
return nil
}
func (s *vmSnapshotSource) getVMRevision() (*snapshotv1.VirtualMachine, error) {
vmi, exists, err := s.controller.getVMI(s.vm)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("can't get vm revision, vmi doesn't exist")
}
crName := vmi.Status.VirtualMachineRevisionName
storeObj, exists, err := s.controller.CRInformer.GetStore().GetByKey(cacheKeyFunc(vmi.Namespace, crName))
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("vm revision %s doesn't exist", crName)
}
cr, ok := storeObj.(*appsv1.ControllerRevision)
if !ok {
return nil, fmt.Errorf("unexpected resource %+v", storeObj)
}
vmRevision := &snapshotv1.VirtualMachine{}
err = json.Unmarshal(cr.Data.Raw, vmRevision)
if err != nil {
return nil, err
}
return vmRevision, nil
}
func (s *vmSnapshotSource) getControllerRevision(namespace, name string) (*appsv1.ControllerRevision, error) {
crKey := cacheKeyFunc(namespace, name)
obj, exists, err := s.controller.CRInformer.GetStore().GetByKey(crKey)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("Unable to fetch ControllerRevision %s", crKey)
}
cr, ok := obj.(*appsv1.ControllerRevision)
if !ok {
return nil, fmt.Errorf("Unexpected object format returned by informer")
}
return cr, nil
}
func (s *vmSnapshotSource) captureInstancetypeControllerRevision(namespace, revisionName string) (string, error) {
existingCR, err := s.getControllerRevision(namespace, revisionName)
if err != nil {
return "", err
}
snapshotCR := existingCR.DeepCopy()
snapshotCR.ObjectMeta.Reset()
snapshotCR.ObjectMeta.SetLabels(existingCR.Labels)
// We strip out the source VM name from the CR name and replace it with the snapshot name
snapshotCR.Name = strings.Replace(existingCR.Name, s.snapshot.Spec.Source.Name, s.snapshot.Name, 1)
// Ensure GVK is set before we attempt to create the controller OwnerReference below
obj, err := utils.GenerateKubeVirtGroupVersionKind(s.snapshot)
if err != nil {
return "", err
}
snapshot, ok := obj.(*snapshotv1.VirtualMachineSnapshot)
if !ok {
return "", fmt.Errorf("Unexpected object format returned from GenerateKubeVirtGroupVersionKind")
}
snapshotCR.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(snapshot, snapshot.GroupVersionKind())}
snapshotCR, err = s.controller.Client.AppsV1().ControllerRevisions(s.snapshot.Namespace).Create(context.Background(), snapshotCR, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return "", err
}
return snapshotCR.Name, nil
}
func (s *vmSnapshotSource) captureInstancetypeControllerRevisions(vm *snapshotv1.VirtualMachine) error {
if vm.Spec.Instancetype != nil && vm.Spec.Instancetype.RevisionName != "" {
snapshotCRName, err := s.captureInstancetypeControllerRevision(vm.Namespace, vm.Spec.Instancetype.RevisionName)
if err != nil {
return err
}
vm.Spec.Instancetype.RevisionName = snapshotCRName
}
if vm.Spec.Preference != nil && vm.Spec.Preference.RevisionName != "" {
snapshotCRName, err := s.captureInstancetypeControllerRevision(vm.Namespace, vm.Spec.Preference.RevisionName)
if err != nil {
return err
}
vm.Spec.Preference.RevisionName = snapshotCRName
}
return nil
}
func (s *vmSnapshotSource) Spec() (snapshotv1.SourceSpec, error) {
vmCpy := &snapshotv1.VirtualMachine{}
metaObj := *getSimplifiedMetaObject(s.vm.ObjectMeta)
if s.Online() {
var err error
vmCpy, err = s.getVMRevision()
if err != nil {
return snapshotv1.SourceSpec{}, err
}
vmCpy.ObjectMeta = metaObj
vmCpy.Spec.Template.Spec.Volumes = s.vm.Spec.Template.Spec.Volumes
vmCpy.Spec.Template.Spec.Domain.Devices.Disks = s.vm.Spec.Template.Spec.Domain.Devices.Disks
vmCpy.Spec.DataVolumeTemplates = s.vm.Spec.DataVolumeTemplates
} else {
vmCpy.ObjectMeta = metaObj
vmCpy.Spec = *s.vm.Spec.DeepCopy()
vmCpy.Status = kubevirtv1.VirtualMachineStatus{}
}
if err := s.captureInstancetypeControllerRevisions(vmCpy); err != nil {
return snapshotv1.SourceSpec{}, err
}
return snapshotv1.SourceSpec{
VirtualMachine: vmCpy,
}, nil
}
func (s *vmSnapshotSource) Online() bool {
return s.state.online
}
func (s *vmSnapshotSource) Paused() bool {
return s.state.paused
}
func (s *vmSnapshotSource) GuestAgent() bool {
return s.state.guestAgent
}
func (s *vmSnapshotSource) Frozen() bool {
return s.state.frozen
}
func (s *vmSnapshotSource) Freeze() error {
if !s.Locked() {
return fmt.Errorf("attempting to freeze unlocked VM")
}
if s.Frozen() {
return nil
}
if s.Paused() {
log.Log.Warningf("VM %s is paused - taking snapshot without filesystem freeze. Paused VMs cannot flush memory buffers to disk, which may result in inconsistent snapshots.", s.vm.Name)
return nil
}
if !s.GuestAgent() {
if s.Online() {
log.Log.Warningf("Guest agent does not exist and VM %s is running. Snapshoting without freezing FS. This can result in inconsistent snapshot!", s.vm.Name)
}
return nil
}
log.Log.V(3).Infof("Freezing vm %s file system before taking the snapshot", s.vm.Name)
startTime := time.Now()
err := s.controller.Client.VirtualMachineInstance(s.vm.Namespace).Freeze(context.Background(), s.vm.Name, getFailureDeadline(s.snapshot))
timeTrack(startTime, fmt.Sprintf("Freezing vmi %s", s.vm.Name))
if err != nil {
formattedErr := fmt.Errorf("%s %s: %v", failedFreezeMsg, s.vm.Name, err)
log.Log.Errorf("%s", formattedErr.Error())
return formattedErr
}
s.state.frozen = true
return nil
}
func (s *vmSnapshotSource) Unfreeze() error {
if !s.Locked() || !s.GuestAgent() || s.Paused() {
return nil
}
log.Log.V(3).Infof("Unfreezing vm %s file system after taking the snapshot", s.vm.Name)
defer timeTrack(time.Now(), fmt.Sprintf("Unfreezing vmi %s", s.vm.Name))
err := s.controller.Client.VirtualMachineInstance(s.vm.Namespace).Unfreeze(context.Background(), s.vm.Name)
if err != nil {
return err
}
s.state.frozen = false
return nil
}
func (s *vmSnapshotSource) PersistentVolumeClaims() (map[string]string, error) {
volumes, err := storageutils.GetVolumes(s.vm, s.controller.Client, storageutils.WithAllVolumes)
if err != nil {
return map[string]string{}, err
}
return storagetypes.GetPVCsFromVolumes(volumes), nil
}
func (s *vmSnapshotSource) pvcNames() (sets.String, error) {
ss := sets.NewString()
pvcs, err := s.PersistentVolumeClaims()
if err != nil {
return ss, err
}
for _, pvc := range pvcs {
ss.Insert(pvc)
}
return ss, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package snapshot
import (
"context"
"fmt"
"time"
"kubevirt.io/client-go/kubecli"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/log"
)
// variable so can be overridden in tests
var currentTime = func() *metav1.Time {
t := metav1.Now()
return &t
}
func timeTrack(start time.Time, name string) {
elapsed := time.Since(start)
log.Log.Infof("%s took %s", name, elapsed)
}
func cacheKeyFunc(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func newReadyCondition(status corev1.ConditionStatus, reason string) snapshotv1.Condition {
return snapshotv1.Condition{
Type: snapshotv1.ConditionReady,
Status: status,
Reason: reason,
LastTransitionTime: *currentTime(),
}
}
func newProgressingCondition(status corev1.ConditionStatus, reason string) snapshotv1.Condition {
return snapshotv1.Condition{
Type: snapshotv1.ConditionProgressing,
Status: status,
Reason: reason,
LastTransitionTime: *currentTime(),
}
}
func newFailureCondition(status corev1.ConditionStatus, reason string) snapshotv1.Condition {
return snapshotv1.Condition{
Type: snapshotv1.ConditionFailure,
Status: status,
Reason: reason,
LastTransitionTime: *currentTime(),
}
}
func hasConditionType(conditions []snapshotv1.Condition, condType snapshotv1.ConditionType) bool {
for _, cond := range conditions {
if cond.Type == condType {
return true
}
}
return false
}
func updateCondition(conditions []snapshotv1.Condition, c snapshotv1.Condition) []snapshotv1.Condition {
found := false
for i := range conditions {
if conditions[i].Type == c.Type {
if conditions[i].Status != c.Status || conditions[i].Reason != c.Reason {
conditions[i] = c
}
found = true
break
}
}
if !found {
conditions = append(conditions, c)
}
return conditions
}
func getFailureDeadline(vmSnapshot *snapshotv1.VirtualMachineSnapshot) time.Duration {
failureDeadline := snapshotv1.DefaultFailureDeadline
if vmSnapshot.Spec.FailureDeadline != nil {
failureDeadline = vmSnapshot.Spec.FailureDeadline.Duration
}
return failureDeadline
}
func timeUntilDeadline(vmSnapshot *snapshotv1.VirtualMachineSnapshot) time.Duration {
failureDeadline := getFailureDeadline(vmSnapshot)
// No Deadline set by user
if failureDeadline == 0 {
return failureDeadline
}
deadline := vmSnapshot.CreationTimestamp.Add(failureDeadline)
return time.Until(deadline)
}
func getSimplifiedMetaObject(meta metav1.ObjectMeta) *metav1.ObjectMeta {
result := meta.DeepCopy()
result.ManagedFields = nil
return result
}
func GetSnapshotContents(vmSnapshot *snapshotv1.VirtualMachineSnapshot, client kubecli.KubevirtClient) (*snapshotv1.VirtualMachineSnapshotContent, error) {
if vmSnapshot == nil {
return nil, fmt.Errorf("VirtualMachineSnapshot is nil")
}
if vmSnapshot.Status == nil || vmSnapshot.Status.VirtualMachineSnapshotContentName == nil {
return nil, fmt.Errorf("VirtualMachineSnapshot %s has nil contents name", vmSnapshot.Name)
}
vmSnapshotContentName := *vmSnapshot.Status.VirtualMachineSnapshotContentName
return client.VirtualMachineSnapshotContent(vmSnapshot.Namespace).Get(context.Background(), vmSnapshotContentName, metav1.GetOptions{})
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package types
import (
"errors"
"fmt"
"math"
"strconv"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
virtv1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)
const (
ConfigName = "config"
DefaultFSOverhead = virtv1.Percent("0.055")
FSOverheadMsg = "Using default 5.5%% filesystem overhead for pvc size"
)
var ErrFailedToFindCdi error = errors.New("No CDI instances found")
var ErrMultipleCdiInstances error = errors.New("Detected more than one CDI instance")
func GetFilesystemOverhead(volumeMode *k8sv1.PersistentVolumeMode, storageClass *string, cdiConfig *cdiv1.CDIConfig) (virtv1.Percent, error) {
if IsPVCBlock(volumeMode) {
return "0", nil
}
if cdiConfig.Status.FilesystemOverhead == nil {
return "0", errors.New("CDI config not initialized")
}
if storageClass == nil {
return virtv1.Percent(cdiConfig.Status.FilesystemOverhead.Global), nil
}
fsOverhead, ok := cdiConfig.Status.FilesystemOverhead.StorageClass[*storageClass]
if !ok {
return virtv1.Percent(cdiConfig.Status.FilesystemOverhead.Global), nil
}
return virtv1.Percent(fsOverhead), nil
}
func roundUpToUnit(size, unit float64) float64 {
if size < unit {
return unit
}
return math.Ceil(size/unit) * unit
}
func alignSizeUpTo1MiB(size float64) float64 {
return roundUpToUnit(size, float64(MiB))
}
func GetSizeIncludingGivenOverhead(size *resource.Quantity, overhead virtv1.Percent) (*resource.Quantity, error) {
fsOverhead, err := strconv.ParseFloat(string(overhead), 64)
if err != nil {
return nil, fmt.Errorf("failed to parse filesystem overhead as float: %v", err)
}
totalSize := (1 + fsOverhead) * size.AsApproximateFloat64()
totalSize = alignSizeUpTo1MiB(totalSize)
return resource.NewQuantity(int64(totalSize), size.Format), nil
}
func GetSizeIncludingDefaultFSOverhead(size *resource.Quantity) (*resource.Quantity, error) {
return GetSizeIncludingGivenOverhead(size, DefaultFSOverhead)
}
func GetSizeIncludingFSOverhead(size *resource.Quantity, storageClass *string, volumeMode *k8sv1.PersistentVolumeMode, cdiConfig *cdiv1.CDIConfig) (*resource.Quantity, error) {
cdiFSOverhead, err := GetFilesystemOverhead(volumeMode, storageClass, cdiConfig)
if err != nil {
return nil, err
}
return GetSizeIncludingGivenOverhead(size, cdiFSOverhead)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package types
import (
"context"
"errors"
"fmt"
"maps"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/controller"
)
type CloneSource struct {
Namespace string
Name string
}
func DataVolumeWFFC(dv *cdiv1.DataVolume) bool {
return dv.Status.Phase == cdiv1.WaitForFirstConsumer
}
func HasWFFCDataVolumes(dvs []*cdiv1.DataVolume) bool {
for _, dv := range dvs {
if DataVolumeWFFC(dv) {
return true
}
}
return false
}
func DataVolumeFailed(dv *cdiv1.DataVolume) bool {
return dv.Status.Phase == cdiv1.Failed
}
func HasFailedDataVolumes(dvs []*cdiv1.DataVolume) bool {
for _, dv := range dvs {
if DataVolumeFailed(dv) {
return true
}
}
return false
}
// GetResolvedCloneSource resolves the clone source of a datavolume with sourceRef
// This will be moved to the CDI API package
func GetResolvedCloneSource(ctx context.Context, client kubecli.KubevirtClient, namespace string, dvSpec *cdiv1.DataVolumeSpec) (*cdiv1.DataVolumeSource, error) {
ns := namespace
source := dvSpec.Source
if dvSpec.SourceRef != nil && dvSpec.SourceRef.Kind == "DataSource" {
if dvSpec.SourceRef.Namespace != nil {
ns = *dvSpec.SourceRef.Namespace
}
ds, err := client.CdiClient().CdiV1beta1().DataSources(ns).Get(ctx, dvSpec.SourceRef.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
resolvedSource := ds.Spec.Source.DeepCopy()
if ds.Spec.Source.DataSource != nil {
resolvedSource = ds.Status.Source.DeepCopy()
}
source = &cdiv1.DataVolumeSource{
PVC: resolvedSource.PVC,
Snapshot: resolvedSource.Snapshot,
}
}
if source == nil {
return source, nil
}
switch {
case source.PVC != nil:
if source.PVC.Namespace == "" {
source.PVC.Namespace = ns
}
case source.Snapshot != nil:
if source.Snapshot.Namespace == "" {
source.Snapshot.Namespace = ns
}
default:
source = nil
}
return source, nil
}
func GenerateDataVolumeFromTemplate(clientset kubecli.KubevirtClient, dataVolumeTemplate virtv1.DataVolumeTemplateSpec, namespace, priorityClassName string) (*cdiv1.DataVolume, error) {
newDataVolume := &cdiv1.DataVolume{}
newDataVolume.Spec = *dataVolumeTemplate.Spec.DeepCopy()
newDataVolume.ObjectMeta = *dataVolumeTemplate.ObjectMeta.DeepCopy()
newDataVolume.ObjectMeta.Labels = maps.Clone(dataVolumeTemplate.Labels)
if newDataVolume.ObjectMeta.Labels == nil {
newDataVolume.ObjectMeta.Labels = make(map[string]string)
}
newDataVolume.ObjectMeta.Annotations = maps.Clone(dataVolumeTemplate.Annotations)
if newDataVolume.ObjectMeta.Annotations == nil {
newDataVolume.ObjectMeta.Annotations = make(map[string]string, 1)
}
newDataVolume.ObjectMeta.Annotations[allowClaimAdoptionAnnotation] = "true"
if newDataVolume.Spec.PriorityClassName == "" && priorityClassName != "" {
newDataVolume.Spec.PriorityClassName = priorityClassName
}
dvSource, err := GetResolvedCloneSource(context.TODO(), clientset, namespace, &newDataVolume.Spec)
if err != nil {
return nil, err
}
if dvSource != nil {
// If SourceRef is set, populate spec.Source with data from the DataSource
// If not, update the field anyway to account for possible namespace changes
if newDataVolume.Spec.SourceRef != nil {
newDataVolume.Spec.SourceRef = nil
}
newDataVolume.Spec.Source = dvSource
}
return newDataVolume, nil
}
func GetDataVolumeFromCache(namespace, name string, dataVolumeStore cache.Store) (*cdiv1.DataVolume, error) {
key := controller.NamespacedKey(namespace, name)
obj, exists, err := dataVolumeStore.GetByKey(key)
if err != nil {
return nil, fmt.Errorf("error fetching DataVolume %s: %v", key, err)
}
if !exists {
return nil, nil
}
dv, ok := obj.(*cdiv1.DataVolume)
if !ok {
return nil, fmt.Errorf("error converting object to DataVolume: object is of type %T", obj)
}
return dv.DeepCopy(), nil
}
func HasDataVolumeErrors(namespace string, volumes []virtv1.Volume, dataVolumeStore cache.Store) error {
for _, volume := range volumes {
if volume.DataVolume == nil {
continue
}
dv, err := GetDataVolumeFromCache(namespace, volume.DataVolume.Name, dataVolumeStore)
if err != nil {
log.Log.Errorf("Error fetching DataVolume %s: %v", volume.DataVolume.Name, err)
continue
}
if dv == nil {
continue
}
if DataVolumeFailed(dv) {
return fmt.Errorf("DataVolume %s is in Failed phase", volume.DataVolume.Name)
}
dvRunningCond := NewDataVolumeConditionManager().GetCondition(dv, cdiv1.DataVolumeRunning)
if dvRunningCond != nil &&
dvRunningCond.Status == v1.ConditionFalse &&
(dvRunningCond.Reason == "Error" || dvRunningCond.Reason == "ImagePullFailed") {
return fmt.Errorf("DataVolume %s importer has stopped running due to an error: %v",
volume.DataVolume.Name, dvRunningCond.Message)
}
}
return nil
}
// FIXME: Bound mistakenly reports ErrExceededQuota with ConditionUnknown status
func HasDataVolumeExceededQuotaError(dv *cdiv1.DataVolume) error {
dvBoundCond := NewDataVolumeConditionManager().GetCondition(dv, cdiv1.DataVolumeBound)
if dvBoundCond != nil && dvBoundCond.Status != v1.ConditionTrue && dvBoundCond.Reason == "ErrExceededQuota" {
return fmt.Errorf("DataVolume %s importer is not running due to an error: %v", dv.Name, dvBoundCond.Message)
}
return nil
}
func HasDataVolumeProvisioning(namespace string, volumes []virtv1.Volume, dataVolumeStore cache.Store) bool {
for _, volume := range volumes {
if volume.DataVolume == nil {
continue
}
dv, err := GetDataVolumeFromCache(namespace, volume.DataVolume.Name, dataVolumeStore)
if err != nil {
log.Log.Errorf("Error fetching DataVolume %s while determining virtual machine status: %v", volume.DataVolume.Name, err)
continue
}
if dv == nil || dv.Status.Phase == cdiv1.Succeeded || dv.Status.Phase == cdiv1.PendingPopulation {
continue
}
dvConditions := NewDataVolumeConditionManager()
isBound := dvConditions.HasConditionWithStatus(dv, cdiv1.DataVolumeBound, v1.ConditionTrue)
// WFFC + plus unbound is not provisioning
if isBound || dv.Status.Phase != cdiv1.WaitForFirstConsumer {
return true
}
}
return false
}
func ListDataVolumesFromTemplates(namespace string, dvTemplates []virtv1.DataVolumeTemplateSpec, dataVolumeStore cache.Store) ([]*cdiv1.DataVolume, error) {
dataVolumes := []*cdiv1.DataVolume{}
for _, template := range dvTemplates {
// get DataVolume from cache for each templated dataVolume
dv, err := GetDataVolumeFromCache(namespace, template.Name, dataVolumeStore)
if err != nil {
return dataVolumes, err
} else if dv == nil {
continue
}
dataVolumes = append(dataVolumes, dv)
}
return dataVolumes, nil
}
func ListDataVolumesFromVolumes(namespace string, volumes []virtv1.Volume, dataVolumeStore cache.Store, pvcStore cache.Store) ([]*cdiv1.DataVolume, error) {
dataVolumes := []*cdiv1.DataVolume{}
for _, volume := range volumes {
dataVolumeName := getDataVolumeName(namespace, volume, pvcStore)
if dataVolumeName == nil {
continue
}
dv, err := GetDataVolumeFromCache(namespace, *dataVolumeName, dataVolumeStore)
if err != nil {
return dataVolumes, err
} else if dv == nil {
continue
}
dataVolumes = append(dataVolumes, dv)
}
return dataVolumes, nil
}
func getDataVolumeName(namespace string, volume virtv1.Volume, pvcStore cache.Store) *string {
if volume.VolumeSource.PersistentVolumeClaim != nil {
pvcInterface, pvcExists, _ := pvcStore.
GetByKey(fmt.Sprintf("%s/%s", namespace, volume.VolumeSource.PersistentVolumeClaim.ClaimName))
if pvcExists {
pvc := pvcInterface.(*v1.PersistentVolumeClaim)
pvcOwner := metav1.GetControllerOf(pvc)
if pvcOwner != nil && pvcOwner.Kind == "DataVolume" {
return &pvcOwner.Name
}
}
} else if volume.VolumeSource.DataVolume != nil {
return &volume.VolumeSource.DataVolume.Name
}
return nil
}
func DataVolumeByNameFunc(dataVolumeStore cache.Store, dataVolumes []*cdiv1.DataVolume) func(name string, namespace string) (*cdiv1.DataVolume, error) {
return func(name, namespace string) (*cdiv1.DataVolume, error) {
for _, dataVolume := range dataVolumes {
if dataVolume.Name == name && dataVolume.Namespace == namespace {
return dataVolume, nil
}
}
dv, exists, _ := dataVolumeStore.GetByKey(fmt.Sprintf("%s/%s", namespace, name))
if !exists {
return nil, fmt.Errorf("unable to find datavolume %s/%s", namespace, name)
}
return dv.(*cdiv1.DataVolume), nil
}
}
type DataVolumeConditionManager struct {
}
func NewDataVolumeConditionManager() *DataVolumeConditionManager {
return &DataVolumeConditionManager{}
}
func (d *DataVolumeConditionManager) GetCondition(dv *cdiv1.DataVolume, cond cdiv1.DataVolumeConditionType) *cdiv1.DataVolumeCondition {
if dv == nil {
return nil
}
for _, c := range dv.Status.Conditions {
if c.Type == cond {
return &c
}
}
return nil
}
func (d *DataVolumeConditionManager) HasCondition(dv *cdiv1.DataVolume, cond cdiv1.DataVolumeConditionType) bool {
return d.GetCondition(dv, cond) != nil
}
func (d *DataVolumeConditionManager) HasConditionWithStatus(dv *cdiv1.DataVolume, cond cdiv1.DataVolumeConditionType, status v1.ConditionStatus) bool {
c := d.GetCondition(dv, cond)
return c != nil && c.Status == status
}
func (d *DataVolumeConditionManager) HasConditionWithStatusAndReason(dv *cdiv1.DataVolume, cond cdiv1.DataVolumeConditionType, status v1.ConditionStatus, reason string) bool {
c := d.GetCondition(dv, cond)
return c != nil && c.Status == status && c.Reason == reason
}
var ErrDVNotFound = errors.New("Datavolume not found")
type DVNotFoundError struct {
DVName string
Err error
}
func NewDVNotFoundError(name string) error {
return &DVNotFoundError{DVName: name, Err: ErrDVNotFound}
}
func (e *DVNotFoundError) Error() string {
if e.DVName == "" {
return "datavolume not defined"
}
return fmt.Sprintf("the datavolume %s doesn't exist", e.DVName)
}
func (e *DVNotFoundError) Unwrap() error {
return e.Err
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package types
import (
"errors"
"fmt"
"strings"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/controller"
)
const (
MiB = 1024 * 1024
allowClaimAdoptionAnnotation = "cdi.kubevirt.io/allowClaimAdoption"
)
type PvcNotFoundError struct {
Reason string
}
func (e PvcNotFoundError) Error() string {
return e.Reason
}
func IsPVCBlockFromStore(store cache.Store, namespace string, claimName string) (pvc *k8sv1.PersistentVolumeClaim, exists bool, isBlockDevice bool, err error) {
obj, exists, err := store.GetByKey(namespace + "/" + claimName)
if err != nil || !exists {
return nil, exists, false, err
}
if pvc, ok := obj.(*k8sv1.PersistentVolumeClaim); ok {
return obj.(*k8sv1.PersistentVolumeClaim), true, IsPVCBlock(pvc.Spec.VolumeMode), nil
}
return nil, false, false, fmt.Errorf("this is not a PVC! %v", obj)
}
func IsPVCBlock(volumeMode *k8sv1.PersistentVolumeMode) bool {
// We do not need to consider the data in a PersistentVolume (as of Kubernetes 1.9)
// If a PVC does not specify VolumeMode and the PV specifies VolumeMode = Block
// the claim will not be bound. So for the sake of a boolean answer, if the PVC's
// VolumeMode is Block, that unambiguously answers the question
return volumeMode != nil && *volumeMode == k8sv1.PersistentVolumeBlock
}
func HasSharedAccessMode(accessModes []k8sv1.PersistentVolumeAccessMode) bool {
for _, accessMode := range accessModes {
if accessMode == k8sv1.ReadWriteMany {
return true
}
}
return false
}
func IsReadOnlyAccessMode(accessModes []k8sv1.PersistentVolumeAccessMode) bool {
for _, accessMode := range accessModes {
if accessMode == k8sv1.ReadOnlyMany {
return true
}
}
return false
}
func IsReadWriteOnceAccessMode(accessModes []k8sv1.PersistentVolumeAccessMode) bool {
for _, accessMode := range accessModes {
if accessMode == k8sv1.ReadOnlyMany || accessMode == k8sv1.ReadWriteMany {
return false
}
}
return true
}
func IsPreallocated(annotations map[string]string) bool {
for a, value := range annotations {
if strings.Contains(a, "/storage.preallocation") && value == "true" {
return true
}
if strings.Contains(a, "/storage.thick-provisioned") && value == "true" {
return true
}
}
return false
}
func PVCNameFromVirtVolume(volume *virtv1.Volume) string {
if volume.DataVolume != nil {
// TODO, look up the correct PVC name based on the datavolume, right now they match, but that will not always be true.
return volume.DataVolume.Name
} else if volume.PersistentVolumeClaim != nil {
return volume.PersistentVolumeClaim.ClaimName
} else if volume.MemoryDump != nil {
return volume.MemoryDump.ClaimName
}
return ""
}
func GetPVCsFromVolumes(volumes []virtv1.Volume) map[string]string {
pvcs := map[string]string{}
for _, volume := range volumes {
pvcName := PVCNameFromVirtVolume(&volume)
if pvcName == "" {
continue
}
pvcs[volume.Name] = pvcName
}
return pvcs
}
func VirtVolumesToPVCMap(volumes []*virtv1.Volume, pvcStore cache.Store, namespace string) (map[string]*k8sv1.PersistentVolumeClaim, error) {
volumeNamesPVCMap := make(map[string]*k8sv1.PersistentVolumeClaim)
for _, volume := range volumes {
claimName := PVCNameFromVirtVolume(volume)
if claimName == "" {
return nil, fmt.Errorf("volume %s is not a PVC or Datavolume", volume.Name)
}
pvc, exists, _, err := IsPVCBlockFromStore(pvcStore, namespace, claimName)
if err != nil {
return nil, fmt.Errorf("failed to get PVC: %v", err)
}
if !exists {
return nil, fmt.Errorf("claim %s not found", claimName)
}
volumeNamesPVCMap[volume.Name] = pvc
}
return volumeNamesPVCMap, nil
}
var ErrPVCNotFound = errors.New("PVC not found")
type PVCNotFoundError struct {
PVCName string
Err error
}
func NewPVCNotFoundError(pvcName string) error {
return &PVCNotFoundError{PVCName: pvcName, Err: ErrPVCNotFound}
}
func (e *PVCNotFoundError) Error() string {
if e.PVCName == "" {
return "persistent volume claim not defined"
}
return fmt.Sprintf("the pvc %s doesn't exist", e.PVCName)
}
func (e *PVCNotFoundError) Unwrap() error {
return e.Err
}
func GetPersistentVolumeClaimFromCache(namespace, name string, pvcStore cache.Store) (*k8sv1.PersistentVolumeClaim, error) {
key := controller.NamespacedKey(namespace, name)
obj, exists, err := pvcStore.GetByKey(key)
if err != nil {
return nil, fmt.Errorf("error fetching PersistentVolumeClaim %s: %v", key, err)
}
if !exists {
return nil, nil
}
pvc, ok := obj.(*k8sv1.PersistentVolumeClaim)
if !ok {
return nil, fmt.Errorf("error converting object to PersistentVolumeClaim: object is of type %T", obj)
}
return pvc.DeepCopy(), nil
}
func HasUnboundPVC(namespace string, volumes []virtv1.Volume, pvcStore cache.Store) bool {
for _, volume := range volumes {
claimName := PVCNameFromVirtVolume(&volume)
if claimName == "" {
continue
}
pvc, err := GetPersistentVolumeClaimFromCache(namespace, claimName, pvcStore)
if err != nil {
log.Log.Errorf("Error fetching PersistentVolumeClaim %s while determining virtual machine status: %v", claimName, err)
continue
}
if pvc == nil {
continue
}
if pvc.Status.Phase != k8sv1.ClaimBound {
return true
}
}
return false
}
func VolumeReadyToAttachToNode(namespace string, volume virtv1.Volume, dataVolumes []*cdiv1.DataVolume, dataVolumeStore, pvcStore cache.Store) (bool, bool, error) {
name := PVCNameFromVirtVolume(&volume)
dataVolumeFunc := DataVolumeByNameFunc(dataVolumeStore, dataVolumes)
wffc := false
ready := false
// err is always nil
pvcInterface, pvcExists, _ := pvcStore.GetByKey(fmt.Sprintf("%s/%s", namespace, name))
if pvcExists {
var err error
pvc := pvcInterface.(*k8sv1.PersistentVolumeClaim)
ready, err = cdiv1.IsSucceededOrPendingPopulation(pvc, dataVolumeFunc)
if err != nil {
return false, false, err
}
if !ready {
waitsForFirstConsumer, err := cdiv1.IsWaitForFirstConsumerBeforePopulating(pvc, dataVolumeFunc)
if err != nil {
return false, false, err
}
if waitsForFirstConsumer {
wffc = true
}
}
} else {
return false, false, PvcNotFoundError{Reason: fmt.Sprintf("didn't find PVC %v", name)}
}
return ready, wffc, nil
}
func RenderPVC(size *resource.Quantity, claimName, namespace, storageClass, accessMode string, blockVolume bool) *k8sv1.PersistentVolumeClaim {
pvc := &k8sv1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: claimName,
Namespace: namespace,
},
Spec: k8sv1.PersistentVolumeClaimSpec{
Resources: k8sv1.VolumeResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceStorage: *size,
},
},
},
}
if storageClass != "" {
pvc.Spec.StorageClassName = &storageClass
}
if accessMode != "" {
pvc.Spec.AccessModes = []k8sv1.PersistentVolumeAccessMode{k8sv1.PersistentVolumeAccessMode(accessMode)}
} else {
pvc.Spec.AccessModes = []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce}
}
if blockVolume {
volMode := k8sv1.PersistentVolumeBlock
pvc.Spec.VolumeMode = &volMode
}
return pvc
}
func GetDisksByName(vmiSpec *virtv1.VirtualMachineInstanceSpec) map[string]*virtv1.Disk {
disks := map[string]*virtv1.Disk{}
for _, disk := range vmiSpec.Domain.Devices.Disks {
disks[disk.Name] = disk.DeepCopy()
}
return disks
}
// Get expected disk capacity - a minimum between the request and the PVC capacity.
// Returns nil when we have insufficient data to calculate this minimum.
func GetDiskCapacity(pvcInfo *virtv1.PersistentVolumeClaimInfo) *int64 {
logger := log.DefaultLogger()
storageCapacityResource, ok := pvcInfo.Capacity[k8sv1.ResourceStorage]
if !ok {
return nil
}
storageCapacity, ok := storageCapacityResource.AsInt64()
if !ok {
logger.Infof("Failed to convert storage capacity %+v to int64", storageCapacityResource)
return nil
}
storageRequestResource, ok := pvcInfo.Requests[k8sv1.ResourceStorage]
if !ok {
return nil
}
storageRequest, ok := storageRequestResource.AsInt64()
if !ok {
logger.Infof("Failed to convert storage request %+v to int64", storageRequestResource)
return nil
}
preferredSize := min(storageRequest, storageCapacity)
return &preferredSize
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package types
import (
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
)
func IsStorageVolume(volume *v1.Volume) bool {
return volume.PersistentVolumeClaim != nil || volume.DataVolume != nil
}
func IsDeclarativeHotplugVolume(vol *v1.Volume) bool {
if vol == nil {
return false
}
volSrc := vol.VolumeSource
if volSrc.PersistentVolumeClaim != nil && volSrc.PersistentVolumeClaim.Hotpluggable {
return true
}
if volSrc.DataVolume != nil && volSrc.DataVolume.Hotpluggable {
return true
}
return false
}
func IsHotpluggableVolumeSource(vol *v1.Volume) bool {
return IsStorageVolume(vol) ||
vol.MemoryDump != nil
}
func IsHotplugVolume(vol *v1.Volume) bool {
if vol == nil {
return false
}
if IsDeclarativeHotplugVolume(vol) {
return true
}
volSrc := vol.VolumeSource
if volSrc.MemoryDump != nil && volSrc.MemoryDump.PersistentVolumeClaimVolumeSource.Hotpluggable {
return true
}
return false
}
func IsUtilityVolume(vmi *v1.VirtualMachineInstance, volumeName string) bool {
for _, utilityVolume := range vmi.Spec.UtilityVolumes {
if utilityVolume.Name == volumeName {
return true
}
}
return false
}
func GetHotplugVolumes(vmi *v1.VirtualMachineInstance, virtlauncherPod *k8sv1.Pod) []*v1.Volume {
hotplugVolumes := make([]*v1.Volume, 0)
podVolumes := virtlauncherPod.Spec.Volumes
vmiVolumes := vmi.Spec.Volumes
podVolumeMap := make(map[string]k8sv1.Volume)
for _, podVolume := range podVolumes {
podVolumeMap[podVolume.Name] = podVolume
}
for _, vmiVolume := range vmiVolumes {
if _, ok := podVolumeMap[vmiVolume.Name]; !ok && IsHotpluggableVolumeSource(&vmiVolume) {
hotplugVolumes = append(hotplugVolumes, vmiVolume.DeepCopy())
}
}
// Also include utility volumes, converting them to regular volumes
for _, utilityVolume := range vmi.Spec.UtilityVolumes {
if _, ok := podVolumeMap[utilityVolume.Name]; !ok {
volume := &v1.Volume{
Name: utilityVolume.Name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: utilityVolume.PersistentVolumeClaimVolumeSource,
Hotpluggable: true,
},
},
}
hotplugVolumes = append(hotplugVolumes, volume)
}
}
return hotplugVolumes
}
func GetVolumesByName(vmiSpec *v1.VirtualMachineInstanceSpec) map[string]*v1.Volume {
volumes := map[string]*v1.Volume{}
for _, vol := range vmiSpec.Volumes {
volumes[vol.Name] = vol.DeepCopy()
}
return volumes
}
func GetFilesystemsFromVolumes(vmi *v1.VirtualMachineInstance) map[string]*v1.Filesystem {
fs := map[string]*v1.Filesystem{}
for _, f := range vmi.Spec.Domain.Devices.Filesystems {
fs[f.Name] = f.DeepCopy()
}
return fs
}
func IsMigratedVolume(name string, vmi *v1.VirtualMachineInstance) bool {
for _, v := range vmi.Status.MigratedVolumes {
if v.VolumeName == name {
return true
}
}
return false
}
func GetTotalSizeMigratedVolumes(vmi *v1.VirtualMachineInstance) *resource.Quantity {
size := int64(0)
srcVols := make(map[string]bool)
for _, v := range vmi.Status.MigratedVolumes {
if v.SourcePVCInfo == nil {
continue
}
srcVols[v.SourcePVCInfo.ClaimName] = true
}
for _, vstatus := range vmi.Status.VolumeStatus {
if vstatus.PersistentVolumeClaimInfo == nil {
continue
}
if _, ok := srcVols[vstatus.PersistentVolumeClaimInfo.ClaimName]; ok {
if s := GetDiskCapacity(vstatus.PersistentVolumeClaimInfo); s != nil {
size += *s
}
}
}
return resource.NewScaledQuantity(size, resource.Giga)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package utils
import (
"context"
"errors"
"fmt"
"sort"
"github.com/openshift/library-go/pkg/build/naming"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
validation "k8s.io/apimachinery/pkg/util/validation"
v1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/kubecli"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
)
type VolumeOption int
const (
// Default option, just includes regular volumes found in the VM/VMI spec
WithRegularVolumes VolumeOption = iota
// Includes backend storage PVC
WithBackendVolume
// Includes all volumes
WithAllVolumes
)
var ErrNoBackendPVC = fmt.Errorf("no backend PVC when there should be one")
// GetVolumes returns all volumes of the passed object, empty if it's an unsupported object
func GetVolumes(obj interface{}, client kubecli.KubevirtClient, opts ...VolumeOption) ([]v1.Volume, error) {
switch obj := obj.(type) {
case *v1.VirtualMachine:
return getVirtualMachineVolumes(obj, client, opts...)
case *snapshotv1.VirtualMachine:
return getSnapshotVirtualMachineVolumes(obj, client, opts...)
case *v1.VirtualMachineInstance:
return getVirtualMachineInstanceVolumes(obj, opts...)
default:
return []v1.Volume{}, fmt.Errorf("unsupported object type: %T", obj)
}
}
// getVirtualMachineVolumes returns all volumes of a VM except the special ones based on volume options
func getVirtualMachineVolumes(vm *v1.VirtualMachine, client kubecli.KubevirtClient, opts ...VolumeOption) ([]v1.Volume, error) {
return getVolumes(vm, vm.Spec.Template.Spec.Volumes, client, opts...)
}
// getSnapshotVirtualMachineVolumes returns all volumes of a Snapshot VM except the special ones based on volume options
func getSnapshotVirtualMachineVolumes(vm *snapshotv1.VirtualMachine, client kubecli.KubevirtClient, opts ...VolumeOption) ([]v1.Volume, error) {
return getVolumes(vm, vm.Spec.Template.Spec.Volumes, client, opts...)
}
// getVirtualMachineInstanceVolumes returns all volumes of a VMI except the special ones based on volume options
func getVirtualMachineInstanceVolumes(vmi *v1.VirtualMachineInstance, opts ...VolumeOption) ([]v1.Volume, error) {
return getVolumes(vmi, vmi.Spec.Volumes, nil, opts...)
}
func getVolumes(obj metav1.Object, volumes []v1.Volume, client kubecli.KubevirtClient, opts ...VolumeOption) ([]v1.Volume, error) {
var enumeratedVolumes []v1.Volume
if needsRegularVolumes(opts) {
for _, volume := range volumes {
enumeratedVolumes = append(enumeratedVolumes, volume)
}
}
if needsBackendPVC(obj, opts) {
backendVolumeName, err := getBackendPVCName(obj, client)
if err != nil {
return enumeratedVolumes, err
}
if backendVolumeName != "" {
enumeratedVolumes = append(enumeratedVolumes, *createBackendPVCVolume(backendVolumeName, obj.GetName()))
}
}
return enumeratedVolumes, nil
}
func getBackendPVCName(obj metav1.Object, client kubecli.KubevirtClient) (string, error) {
switch obj := obj.(type) {
case *v1.VirtualMachineInstance:
return backendstorage.CurrentPVCName(obj), nil
default:
// TODO: This could be way more simpler if the backend PVC name was accessible from the VM spec/status.
// Refactor this once the backend PVC is more accessible.
if client == nil {
return "", fmt.Errorf("no client provided")
}
pvcs, err := client.CoreV1().PersistentVolumeClaims(obj.GetNamespace()).List(context.Background(), metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", backendstorage.PVCPrefix, obj.GetName()),
})
if err != nil {
return "", err
}
switch len(pvcs.Items) {
case 1:
return pvcs.Items[0].Name, nil
case 0:
return "", ErrNoBackendPVC
default:
pvc, err := getNewestNonTerminatingPVC(pvcs.Items)
if err != nil {
return "", fmt.Errorf("no non-terminating PVC found")
}
return pvc.Name, nil
}
}
}
func needsBackendPVC(obj metav1.Object, opts []VolumeOption) bool {
for _, opt := range opts {
if opt == WithBackendVolume || opt == WithAllVolumes {
return backendstorage.IsBackendStorageNeeded(obj)
}
}
return false
}
func needsRegularVolumes(opts []VolumeOption) bool {
if len(opts) == 0 {
return true
}
for _, opt := range opts {
if opt == WithRegularVolumes || opt == WithAllVolumes {
return true
}
}
return false
}
func createBackendPVCVolume(pvcName, vmName string) *v1.Volume {
return &v1.Volume{
Name: BackendPVCVolumeName(vmName),
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: false,
},
},
},
}
}
// BackendPVCVolumeName return the name of the volume that will be arbitrarily used to represent
// the backend PVC during volume enumeration.
func BackendPVCVolumeName(vmName string) string {
return naming.GetName(backendstorage.PVCPrefix, vmName, validation.DNS1035LabelMaxLength)
}
// Helper function to select the newest non-terminating PVC
func getNewestNonTerminatingPVC(pvcs []k8sv1.PersistentVolumeClaim) (*k8sv1.PersistentVolumeClaim, error) {
nonTerminatingPVCs := []k8sv1.PersistentVolumeClaim{}
for _, pvc := range pvcs {
if pvc.ObjectMeta.DeletionTimestamp == nil {
nonTerminatingPVCs = append(nonTerminatingPVCs, pvc)
}
}
if len(nonTerminatingPVCs) == 0 {
return nil, fmt.Errorf("no non-terminating PVCs found")
}
sort.Slice(nonTerminatingPVCs, func(i, j int) bool {
return nonTerminatingPVCs[i].CreationTimestamp.After(nonTerminatingPVCs[j].CreationTimestamp.Time)
})
return &nonTerminatingPVCs[0], nil
}
func IsErrNoBackendPVC(err error) bool {
return errors.Is(err, ErrNoBackendPVC)
}
package testutils
import (
"strings"
. "github.com/onsi/gomega"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
const virtioTrans = "virtio-transitional"
func ExpectVirtioTransitionalOnly(dom *api.DomainSpec) {
hit := false
for _, disk := range dom.Devices.Disks {
if disk.Target.Bus == v1.DiskBusVirtio {
ExpectWithOffset(1, disk.Model).To(Equal(virtioTrans))
hit = true
}
}
ExpectWithOffset(1, hit).To(BeTrue())
hit = false
for _, ifc := range dom.Devices.Interfaces {
if strings.HasPrefix(ifc.Model.Type, v1.VirtIO) {
ExpectWithOffset(1, ifc.Model.Type).To(Equal(virtioTrans))
hit = true
}
}
ExpectWithOffset(1, hit).To(BeTrue())
hit = false
for _, input := range dom.Devices.Inputs {
if strings.HasPrefix(input.Model, v1.VirtIO) {
// All our input types only exist only as virtio 1.0 and only accept virtio
ExpectWithOffset(1, input.Model).To(Equal(v1.VirtIO))
hit = true
}
}
ExpectWithOffset(1, hit).To(BeTrue())
hitCount := 0
for _, controller := range dom.Devices.Controllers {
if controller.Type == "virtio-serial" {
ExpectWithOffset(1, controller.Model).To(Equal(virtioTrans))
hitCount++
}
if controller.Type == "scsi" {
ExpectWithOffset(1, controller.Model).To(Equal(virtioTrans))
hitCount++
}
}
ExpectWithOffset(1, hitCount).To(BeNumerically("==", 2))
ExpectWithOffset(1, dom.Devices.Rng.Model).To(Equal(virtioTrans))
ExpectWithOffset(1, dom.Devices.Ballooning.Model).To(Equal(virtioTrans))
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package testutils
import (
"fmt"
"strings"
"github.com/onsi/gomega"
"github.com/onsi/gomega/types"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
)
func HaveStatusCode(expected interface{}) types.GomegaMatcher {
return &haveStatusCodeMatcher{
expected: expected,
}
}
type haveStatusCodeMatcher struct {
expected interface{}
statusCode int
}
func (matcher *haveStatusCodeMatcher) Match(actual interface{}) (success bool, err error) {
result, ok := actual.(rest.Result)
if !ok {
return false, fmt.Errorf("HaveStatusCode matcher expects a kubernetes rest client Result")
}
expectedStatusCode, ok := matcher.expected.(int)
if !ok {
return false, fmt.Errorf("Expected status code to be of type int")
}
result.StatusCode(&matcher.statusCode)
if result.Error() != nil {
matcher.statusCode = int(result.Error().(*errors.StatusError).Status().Code)
}
return matcher.statusCode == expectedStatusCode, nil
}
func (matcher *haveStatusCodeMatcher) FailureMessage(actual interface{}) (message string) {
return fmt.Sprintf("Expected status code \n\t%#v\not to be\n\t%#v", matcher.statusCode, matcher.expected)
}
func (matcher *haveStatusCodeMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return fmt.Sprintf("Expected status code \n\t%#v\nnot to be\n\t%#v", matcher.statusCode, matcher.expected)
}
// In case we don't care about emitted events, we simply consume all of them and return.
func IgnoreEvents(recorder *record.FakeRecorder) {
loop:
for {
select {
case <-recorder.Events:
default:
break loop
}
}
}
func ExpectEvent(recorder *record.FakeRecorder, reason string) {
gomega.ExpectWithOffset(1, recorder.Events).To(gomega.Receive(gomega.ContainSubstring(reason)))
}
// ExpectEvents checks for given reasons in arbitrary order
func ExpectEvents(recorder *record.FakeRecorder, reasons ...string) {
l := len(reasons)
for x := 0; x < l; x++ {
select {
case e := <-recorder.Events:
filtered := []string{}
found := false
for _, reason := range reasons {
if strings.Contains(e, reason) && !found {
found = true
continue
}
filtered = append(filtered, reason)
}
gomega.ExpectWithOffset(1, found).To(gomega.BeTrue(), "Expected to match event reason '%s' with one of %v", e, reasons)
reasons = filtered
default:
// There should be something, trigger an error
gomega.ExpectWithOffset(1, recorder.Events).To(gomega.Receive())
}
}
}
func SatisfyAnyRegexp(regexps []string) types.GomegaMatcher {
matchers := []types.GomegaMatcher{}
for _, regexp := range regexps {
matchers = append(matchers, gomega.MatchRegexp(regexp))
}
return gomega.SatisfyAny(matchers...)
}
package testutils
import (
"runtime"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/cache"
k8score "k8s.io/api/core/v1"
KVv1 "kubevirt.io/api/core/v1"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
const (
kvObjectNamespace = "kubevirt"
kvObjectName = "kubevirt"
)
func NewFakeClusterConfigUsingKV(kv *KVv1.KubeVirt) (*virtconfig.ClusterConfig, cache.SharedIndexInformer, cache.Store) {
return NewFakeClusterConfigUsingKVWithCPUArch(kv, runtime.GOARCH)
}
func NewFakeClusterConfigUsingKVWithCPUArch(kv *KVv1.KubeVirt, CPUArch string) (*virtconfig.ClusterConfig, cache.SharedIndexInformer, cache.Store) {
kv.ResourceVersion = rand.String(10)
kv.Status.Phase = "Deployed"
crdInformer, _ := NewFakeInformerFor(&extv1.CustomResourceDefinition{})
kubeVirtInformer, _ := NewFakeInformerFor(&KVv1.KubeVirt{})
kubeVirtInformer.GetStore().Add(kv)
AddDataVolumeAPI(crdInformer)
cfg, _ := virtconfig.NewClusterConfigWithCPUArch(crdInformer, kubeVirtInformer, kvObjectNamespace, CPUArch)
return cfg, crdInformer, kubeVirtInformer.GetStore()
}
func NewFakeClusterConfigUsingKVConfig(config *KVv1.KubeVirtConfiguration) (*virtconfig.ClusterConfig, cache.SharedIndexInformer, cache.Store) {
kv := &KVv1.KubeVirt{
ObjectMeta: metav1.ObjectMeta{
Name: kvObjectName,
Namespace: kvObjectNamespace,
},
Spec: KVv1.KubeVirtSpec{
Configuration: *config,
},
Status: KVv1.KubeVirtStatus{
DefaultArchitecture: runtime.GOARCH,
Phase: "Deployed",
},
}
return NewFakeClusterConfigUsingKV(kv)
}
func NewFakeContainerDiskSource() *KVv1.ContainerDiskSource {
return &KVv1.ContainerDiskSource{
Image: "fake-image",
ImagePullSecret: "fake-pull-secret",
Path: "/fake-path",
}
}
func NewFakePersistentVolumeSource() *KVv1.PersistentVolumeClaimVolumeSource {
return &KVv1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: k8score.PersistentVolumeClaimVolumeSource{
ClaimName: "fake-pvc",
},
}
}
func NewFakeMemoryDumpSource(name string) *KVv1.MemoryDumpVolumeSource {
return &KVv1.MemoryDumpVolumeSource{
PersistentVolumeClaimVolumeSource: KVv1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: k8score.PersistentVolumeClaimVolumeSource{
ClaimName: name,
},
Hotpluggable: true,
},
}
}
func RemoveDataVolumeAPI(crdInformer cache.SharedIndexInformer) {
crdInformer.GetStore().Replace(nil, "")
}
func AddDataVolumeAPI(crdInformer cache.SharedIndexInformer) {
crdInformer.GetStore().Add(&extv1.CustomResourceDefinition{
Spec: extv1.CustomResourceDefinitionSpec{
Names: extv1.CustomResourceDefinitionNames{
Kind: "DataVolume",
},
},
})
}
func GetFakeKubeVirtClusterConfig(kubeVirtStore cache.Store) *KVv1.KubeVirt {
obj, _, _ := kubeVirtStore.GetByKey(kvObjectNamespace + "/" + kvObjectName)
return obj.(*KVv1.KubeVirt)
}
func UpdateFakeKubeVirtClusterConfig(kubeVirtStore cache.Store, kv *KVv1.KubeVirt) {
clone := kv.DeepCopy()
clone.ResourceVersion = rand.String(10)
clone.Name = kvObjectName
clone.Namespace = kvObjectNamespace
clone.Status.Phase = "Deployed"
kubeVirtStore.Update(clone)
}
func AddServiceMonitorAPI(crdInformer cache.SharedIndexInformer) {
crdInformer.GetStore().Add(&extv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "service-monitors.monitoring.coreos.com",
},
Spec: extv1.CustomResourceDefinitionSpec{
Names: extv1.CustomResourceDefinitionNames{
Kind: "ServiceMonitor",
},
},
})
}
func RemoveServiceMonitorAPI(crdInformer cache.SharedIndexInformer) {
crdInformer.GetStore().Replace(nil, "")
}
func AddPrometheusRuleAPI(crdInformer cache.SharedIndexInformer) {
crdInformer.GetStore().Add(&extv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "prometheusrules.monitoring.coreos.com",
},
Spec: extv1.CustomResourceDefinitionSpec{
Names: extv1.CustomResourceDefinitionNames{
Kind: "PrometheusRule",
},
},
})
}
func RemovePrometheusRuleAPI(crdInformer cache.SharedIndexInformer) {
crdInformer.GetStore().Replace(nil, "")
}
package testutils
import (
"sync"
"sync/atomic"
"time"
"sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue"
)
/*
MockPriorityQueue is a helper workqueue which can be wrapped around
any RateLimitingInterface implementing queue. This allows synchronous
testing of the controller. The typical pattern is:
MockQueue.ExpectAdd(3)
vmiSource.Add(vmi)
vmiSource.Add(vmi1)
vmiSource.Add(vmi2)
MockQueue.Wait()
This ensures that Source callbacks which are listening on vmiSource
enqueued three times an object. Since enqueing is typically the last
action in listener callbacks, we can assume that the wanted scenario for
a controller is set up, and an execution will process this scenario.
*/
type MockPriorityQueue[T comparable] struct {
priorityqueue.PriorityQueue[T]
addWG *sync.WaitGroup
rateLimitedEnque int32
addAfterEnque int32
wgLock sync.Mutex
}
func (q *MockPriorityQueue[T]) Add(item T) {
q.PriorityQueue.Add(item)
q.wgLock.Lock()
defer q.wgLock.Unlock()
if q.addWG != nil {
q.addWG.Done()
}
}
func (q *MockPriorityQueue[T]) AddRateLimited(item T) {
q.PriorityQueue.AddRateLimited(item)
atomic.AddInt32(&q.rateLimitedEnque, 1)
}
func (q *MockPriorityQueue[T]) AddAfter(item T, duration time.Duration) {
q.PriorityQueue.AddAfter(item, duration)
atomic.AddInt32(&q.addAfterEnque, 1)
q.wgLock.Lock()
defer q.wgLock.Unlock()
if q.addWG != nil {
q.addWG.Done()
}
}
func (q *MockPriorityQueue[T]) AddWithOpts(o priorityqueue.AddOpts, Items ...T) {
q.PriorityQueue.AddWithOpts(o, Items...)
q.wgLock.Lock()
defer q.wgLock.Unlock()
if q.addWG != nil {
for range Items {
q.addWG.Done()
}
}
}
func (q *MockPriorityQueue[T]) GetRateLimitedEnqueueCount() int {
return int(atomic.LoadInt32(&q.rateLimitedEnque))
}
func (q *MockPriorityQueue[T]) GetAddAfterEnqueueCount() int {
return int(atomic.LoadInt32(&q.addAfterEnque))
}
// ExpectAdds allows setting the amount of expected enqueues.
func (q *MockPriorityQueue[T]) ExpectAdds(diff int) {
q.wgLock.Lock()
defer q.wgLock.Unlock()
q.addWG = &sync.WaitGroup{}
q.addWG.Add(diff)
}
// Wait waits until the expected amount of ExpectedAdds has happened.
// It will not block if there were no expectations set.
func (q *MockPriorityQueue[T]) Wait() {
q.wgLock.Lock()
wg := q.addWG
q.wgLock.Unlock()
if wg != nil {
wg.Wait()
q.wgLock.Lock()
q.addWG = nil
q.wgLock.Unlock()
}
}
func NewMockPriorityQueue[T comparable](queue priorityqueue.PriorityQueue[T]) *MockPriorityQueue[T] {
return &MockPriorityQueue[T]{queue, nil, 0, 0, sync.Mutex{}}
}
package testutils
import (
"sync"
"sync/atomic"
"time"
k8sv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
framework "k8s.io/client-go/tools/cache/testing"
"k8s.io/client-go/util/workqueue"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
/*
MockWorkQueue is a helper workqueue which can be wrapped around
any RateLimitingInterface implementing queue. This allows synchronous
testing of the controller. The typical pattern is:
MockQueue.ExpectAdd(3)
vmiSource.Add(vmi)
vmiSource.Add(vmi1)
vmiSource.Add(vmi2)
MockQueue.Wait()
This ensures that Source callbacks which are listening on vmiSource
enqueued three times an object. Since enqueing is typically the last
action in listener callbacks, we can assume that the wanted scenario for
a controller is set up, and an execution will process this scenario.
*/
type MockWorkQueue[T comparable] struct {
workqueue.TypedRateLimitingInterface[T]
addWG *sync.WaitGroup
rateLimitedEnque int32
addAfterEnque int32
wgLock sync.Mutex
}
func (q *MockWorkQueue[T]) Add(item T) {
q.TypedRateLimitingInterface.Add(item)
q.wgLock.Lock()
defer q.wgLock.Unlock()
if q.addWG != nil {
q.addWG.Done()
}
}
func (q *MockWorkQueue[T]) AddRateLimited(item T) {
q.TypedRateLimitingInterface.AddRateLimited(item)
atomic.AddInt32(&q.rateLimitedEnque, 1)
}
func (q *MockWorkQueue[T]) AddAfter(item T, duration time.Duration) {
q.TypedRateLimitingInterface.AddAfter(item, duration)
atomic.AddInt32(&q.addAfterEnque, 1)
q.wgLock.Lock()
defer q.wgLock.Unlock()
if q.addWG != nil {
q.addWG.Done()
}
}
func (q *MockWorkQueue[T]) GetRateLimitedEnqueueCount() int {
return int(atomic.LoadInt32(&q.rateLimitedEnque))
}
func (q *MockWorkQueue[T]) GetAddAfterEnqueueCount() int {
return int(atomic.LoadInt32(&q.addAfterEnque))
}
// ExpectAdds allows setting the amount of expected enqueues.
func (q *MockWorkQueue[T]) ExpectAdds(diff int) {
q.wgLock.Lock()
defer q.wgLock.Unlock()
q.addWG = &sync.WaitGroup{}
q.addWG.Add(diff)
}
// Wait waits until the expected amount of ExpectedAdds has happened.
// It will not block if there were no expectations set.
func (q *MockWorkQueue[T]) Wait() {
q.wgLock.Lock()
wg := q.addWG
q.wgLock.Unlock()
if wg != nil {
wg.Wait()
q.wgLock.Lock()
q.addWG = nil
q.wgLock.Unlock()
}
}
func NewMockWorkQueue[T comparable](queue workqueue.TypedRateLimitingInterface[T]) *MockWorkQueue[T] {
return &MockWorkQueue[T]{queue, nil, 0, 0, sync.Mutex{}}
}
func NewFakeInformerFor(obj runtime.Object) (cache.SharedIndexInformer, *framework.FakeControllerSource) {
objSource := framework.NewFakeControllerSource()
objInformer := cache.NewSharedIndexInformer(objSource, obj, 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
return objInformer, objSource
}
func NewFakeInformerWithIndexersFor(obj runtime.Object, indexers cache.Indexers) (cache.SharedIndexInformer, *framework.FakeControllerSource) {
objSource := framework.NewFakeControllerSource()
objInformer := cache.NewSharedIndexInformer(objSource, obj, 0, indexers)
return objInformer, objSource
}
type VirtualMachineFeeder[T comparable] struct {
MockQueue *MockWorkQueue[T]
Source *framework.FakeControllerSource
}
func (v *VirtualMachineFeeder[T]) Add(vmi *v1.VirtualMachineInstance) {
v.MockQueue.ExpectAdds(1)
v.Source.Add(vmi)
v.MockQueue.Wait()
}
func (v *VirtualMachineFeeder[T]) Modify(vmi *v1.VirtualMachineInstance) {
v.MockQueue.ExpectAdds(1)
v.Source.Modify(vmi)
v.MockQueue.Wait()
}
func (v *VirtualMachineFeeder[T]) Delete(vmi *v1.VirtualMachineInstance) {
v.MockQueue.ExpectAdds(1)
v.Source.Delete(vmi)
v.MockQueue.Wait()
}
func NewVirtualMachineFeeder[T comparable](queue *MockWorkQueue[T], source *framework.FakeControllerSource) *VirtualMachineFeeder[T] {
return &VirtualMachineFeeder[T]{
MockQueue: queue,
Source: source,
}
}
type PodFeeder[T comparable] struct {
MockQueue *MockWorkQueue[T]
Source *framework.FakeControllerSource
}
func (v *PodFeeder[T]) Add(pod *k8sv1.Pod) {
v.MockQueue.ExpectAdds(1)
v.Source.Add(pod)
v.MockQueue.Wait()
}
func (v *PodFeeder[T]) Modify(pod *k8sv1.Pod) {
v.MockQueue.ExpectAdds(1)
v.Source.Modify(pod)
v.MockQueue.Wait()
}
func (v *PodFeeder[T]) Delete(pod *k8sv1.Pod) {
v.MockQueue.ExpectAdds(1)
v.Source.Delete(pod)
v.MockQueue.Wait()
}
func NewPodFeeder[T comparable](queue *MockWorkQueue[T], source *framework.FakeControllerSource) *PodFeeder[T] {
return &PodFeeder[T]{
MockQueue: queue,
Source: source,
}
}
type PodDisruptionBudgetFeeder[T comparable] struct {
MockQueue *MockWorkQueue[T]
Source *framework.FakeControllerSource
}
func (v *PodDisruptionBudgetFeeder[T]) Add(pdb *policyv1.PodDisruptionBudget) {
v.MockQueue.ExpectAdds(1)
v.Source.Add(pdb)
v.MockQueue.Wait()
}
func (v *PodDisruptionBudgetFeeder[T]) Modify(pdb *policyv1.PodDisruptionBudget) {
v.MockQueue.ExpectAdds(1)
v.Source.Modify(pdb)
v.MockQueue.Wait()
}
func (v *PodDisruptionBudgetFeeder[T]) Delete(pdb *policyv1.PodDisruptionBudget) {
v.MockQueue.ExpectAdds(1)
v.Source.Delete(pdb)
v.MockQueue.Wait()
}
func NewPodDisruptionBudgetFeeder[T comparable](queue *MockWorkQueue[T], source *framework.FakeControllerSource) *PodDisruptionBudgetFeeder[T] {
return &PodDisruptionBudgetFeeder[T]{
MockQueue: queue,
Source: source,
}
}
type MigrationFeeder[T comparable] struct {
MockQueue *MockWorkQueue[T]
Source *framework.FakeControllerSource
}
func (v *MigrationFeeder[T]) Add(migration *v1.VirtualMachineInstanceMigration) {
v.MockQueue.ExpectAdds(1)
v.Source.Add(migration)
v.MockQueue.Wait()
}
func (v *MigrationFeeder[T]) Modify(migration *v1.VirtualMachineInstanceMigration) {
v.MockQueue.ExpectAdds(1)
v.Source.Modify(migration)
v.MockQueue.Wait()
}
func (v *MigrationFeeder[T]) Delete(migration *v1.VirtualMachineInstanceMigration) {
v.MockQueue.ExpectAdds(1)
v.Source.Delete(migration)
v.MockQueue.Wait()
}
func NewMigrationFeeder[T comparable](queue *MockWorkQueue[T], source *framework.FakeControllerSource) *MigrationFeeder[T] {
return &MigrationFeeder[T]{
MockQueue: queue,
Source: source,
}
}
type DomainFeeder[T comparable] struct {
MockQueue *MockWorkQueue[T]
Source *framework.FakeControllerSource
}
func (v *DomainFeeder[T]) Add(vmi *api.Domain) {
v.MockQueue.ExpectAdds(1)
v.Source.Add(vmi)
v.MockQueue.Wait()
}
func (v *DomainFeeder[T]) Modify(vmi *api.Domain) {
v.MockQueue.ExpectAdds(1)
v.Source.Modify(vmi)
v.MockQueue.Wait()
}
func (v *DomainFeeder[T]) Delete(vmi *api.Domain) {
v.MockQueue.ExpectAdds(1)
v.Source.Delete(vmi)
v.MockQueue.Wait()
}
func NewDomainFeeder[T comparable](queue *MockWorkQueue[T], source *framework.FakeControllerSource) *DomainFeeder[T] {
return &DomainFeeder[T]{
MockQueue: queue,
Source: source,
}
}
package tpm
import v1 "kubevirt.io/api/core/v1"
func HasDevice(vmiSpec *v1.VirtualMachineInstanceSpec) bool {
return vmiSpec.Domain.Devices.TPM != nil &&
(vmiSpec.Domain.Devices.TPM.Enabled == nil || *vmiSpec.Domain.Devices.TPM.Enabled)
}
func HasPersistentDevice(vmiSpec *v1.VirtualMachineInstanceSpec) bool {
return HasDevice(vmiSpec) &&
vmiSpec.Domain.Devices.TPM.Persistent != nil &&
*vmiSpec.Domain.Devices.TPM.Persistent
}
package unsafepath
import "path/filepath"
type Path struct {
rootBase string
relativePath string
}
func New(rootBase string, relativePath string) *Path {
return &Path{
rootBase: rootBase,
relativePath: relativePath,
}
}
func UnsafeAbsolute(path *Path) string {
return filepath.Join(path.rootBase, path.relativePath)
}
func UnsafeRelative(path *Path) string {
return path.relativePath
}
func UnsafeRoot(path *Path) string {
return path.rootBase
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
package util
import v1 "kubevirt.io/api/core/v1"
// Check if a VMI spec requests AMD SEV
func IsSEVVMI(vmi *v1.VirtualMachineInstance) bool {
if vmi.Spec.Domain.LaunchSecurity == nil {
return false
}
return vmi.Spec.Domain.LaunchSecurity.SEV != nil || vmi.Spec.Domain.LaunchSecurity.SNP != nil
}
// Check if VMI spec requests AMD SEV-ES
func IsSEVESVMI(vmi *v1.VirtualMachineInstance) bool {
if vmi.Spec.Domain.LaunchSecurity == nil ||
vmi.Spec.Domain.LaunchSecurity.SEV == nil ||
vmi.Spec.Domain.LaunchSecurity.SEV.Policy == nil ||
vmi.Spec.Domain.LaunchSecurity.SEV.Policy.EncryptedState == nil {
return false
}
return *vmi.Spec.Domain.LaunchSecurity.SEV.Policy.EncryptedState
}
// Check if a VMI spec requests AMD SEV-SNP
func IsSEVSNPVMI(vmi *v1.VirtualMachineInstance) bool {
return vmi.Spec.Domain.LaunchSecurity != nil && vmi.Spec.Domain.LaunchSecurity.SNP != nil
}
// Check if a VMI spec requests SEV with attestation
func IsSEVAttestationRequested(vmi *v1.VirtualMachineInstance) bool {
if !IsSEVVMI(vmi) {
return false
}
// If SEV-SNP is requested, attestation is not applicable
if IsSEVSNPVMI(vmi) {
return false
}
// Check if SEV is configured before accessing Attestation
if vmi.Spec.Domain.LaunchSecurity.SEV == nil {
return false
}
return vmi.Spec.Domain.LaunchSecurity.SEV.Attestation != nil
}
// Check if a VMI spec requests Intel TDX
func IsTDXVMI(vmi *v1.VirtualMachineInstance) bool {
return vmi.Spec.Domain.LaunchSecurity != nil && vmi.Spec.Domain.LaunchSecurity.TDX != nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package hardware
import (
"bytes"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
const (
PCI_ADDRESS_PATTERN = `^([\da-fA-F]{4}):([\da-fA-F]{2}):([\da-fA-F]{2})\.([0-7]{1})$`
)
// Parse linux cpuset into an array of ints
// See: http://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS
func ParseCPUSetLine(cpusetLine string, limit int) (cpusList []int, err error) {
elements := strings.Split(cpusetLine, ",")
for _, item := range elements {
cpuRange := strings.Split(item, "-")
// provided a range: 1-3
if len(cpuRange) > 1 {
start, err := strconv.Atoi(cpuRange[0])
if err != nil {
return nil, err
}
end, err := strconv.Atoi(cpuRange[1])
if err != nil {
return nil, err
}
// Add cpus to the list. Assuming it's a valid range.
for cpuNum := start; cpuNum <= end; cpuNum++ {
if cpusList, err = safeAppend(cpusList, cpuNum, limit); err != nil {
return nil, err
}
}
} else {
cpuNum, err := strconv.Atoi(cpuRange[0])
if err != nil {
return nil, err
}
if cpusList, err = safeAppend(cpusList, cpuNum, limit); err != nil {
return nil, err
}
}
}
return
}
func safeAppend(cpusList []int, cpu int, limit int) ([]int, error) {
if len(cpusList) > limit {
return nil, fmt.Errorf("rejecting expanding CPU array for safety reasons, limit is %v", limit)
}
return append(cpusList, cpu), nil
}
// GetNumberOfVCPUs returns number of vCPUs
// It counts sockets*cores*threads
func GetNumberOfVCPUs(cpuSpec *v1.CPU) int64 {
vCPUs := cpuSpec.Cores
if cpuSpec.Sockets != 0 {
if vCPUs == 0 {
vCPUs = cpuSpec.Sockets
} else {
vCPUs *= cpuSpec.Sockets
}
}
if cpuSpec.Threads != 0 {
if vCPUs == 0 {
vCPUs = cpuSpec.Threads
} else {
vCPUs *= cpuSpec.Threads
}
}
return int64(vCPUs)
}
// ParsePciAddress returns an array of PCI DBSF fields (domain, bus, slot, function)
func ParsePciAddress(pciAddress string) ([]string, error) {
pciAddrRegx, err := regexp.Compile(PCI_ADDRESS_PATTERN)
if err != nil {
return nil, fmt.Errorf("failed to compile pci address pattern, %v", err)
}
res := pciAddrRegx.FindStringSubmatch(pciAddress)
if len(res) == 0 {
return nil, fmt.Errorf("failed to parse pci address %s", pciAddress)
}
return res[1:], nil
}
func GetDeviceNumaNode(pciAddress string) (*uint32, error) {
pciBasePath := "/sys/bus/pci/devices"
numaNodePath := filepath.Join(pciBasePath, pciAddress, "numa_node")
// #nosec No risk for path injection. Reading static path of NUMA node info
numaNodeStr, err := os.ReadFile(numaNodePath)
if err != nil {
return nil, err
}
numaNodeStr = bytes.TrimSpace(numaNodeStr)
numaNodeInt, err := strconv.Atoi(string(numaNodeStr))
if err != nil {
return nil, err
}
numaNode := uint32(numaNodeInt)
return &numaNode, nil
}
func GetDeviceAlignedCPUs(pciAddress string) ([]int, error) {
numaNode, err := GetDeviceNumaNode(pciAddress)
if err != nil {
return nil, err
}
cpuList, err := GetNumaNodeCPUList(int(*numaNode))
if err != nil {
return nil, err
}
return cpuList, err
}
func GetNumaNodeCPUList(numaNode int) ([]int, error) {
filePath := fmt.Sprintf("/sys/bus/node/devices/node%d/cpulist", numaNode)
content, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
content = bytes.TrimSpace(content)
cpusList, err := ParseCPUSetLine(string(content[:]), 50000)
if err != nil {
return nil, fmt.Errorf("failed to parse cpulist file: %v", err)
}
return cpusList, nil
}
func LookupDeviceVCPUAffinity(pciAddress string, domainSpec *api.DomainSpec) ([]uint32, error) {
alignedVCPUList := []uint32{}
p2vCPUMap := make(map[string]uint32)
alignedPhysicalCPUs, err := GetDeviceAlignedCPUs(pciAddress)
if err != nil {
return nil, err
}
// make sure that the VMI has cpus from this numa node.
cpuTune := domainSpec.CPUTune.VCPUPin
for _, vcpuPin := range cpuTune {
p2vCPUMap[vcpuPin.CPUSet] = vcpuPin.VCPU
}
for _, pcpu := range alignedPhysicalCPUs {
if vCPU, exist := p2vCPUMap[strconv.Itoa(int(pcpu))]; exist {
alignedVCPUList = append(alignedVCPUList, uint32(vCPU))
}
}
return alignedVCPUList, nil
}
package lookup
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
)
func VirtualMachinesOnNode(cli kubecli.KubevirtClient, nodeName string) ([]*virtv1.VirtualMachineInstance, error) {
labelSelector, err := labels.Parse(fmt.Sprintf("%s in (%s)", virtv1.NodeNameLabel, nodeName))
if err != nil {
return nil, err
}
list, err := cli.VirtualMachineInstance(v1.NamespaceAll).List(context.Background(), metav1.ListOptions{
LabelSelector: labelSelector.String(),
})
if err != nil {
return nil, err
}
vmis := []*virtv1.VirtualMachineInstance{}
for i := range list.Items {
vmis = append(vmis, &list.Items[i])
}
return vmis, nil
}
func ActiveVirtualMachinesOnNode(cli kubecli.KubevirtClient, nodeName string) ([]*virtv1.VirtualMachineInstance, error) {
vmis, err := VirtualMachinesOnNode(cli, nodeName)
if err != nil {
return nil, err
}
activeVMIs := []*virtv1.VirtualMachineInstance{}
for _, vmi := range vmis {
if !vmi.IsRunning() && !vmi.IsScheduled() {
continue
}
activeVMIs = append(activeVMIs, vmi)
}
return activeVMIs, nil
}
package migrations
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/pointer"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
const CancelMigrationFailedVmiNotMigratingErr = "failed to cancel migration - vmi is not migrating"
const (
QueuePriorityRunning int = 1000
QueuePrioritySystemCritical int = 100
QueuePriorityUserTriggered int = 50
QueuePrioritySystemMaintenance int = 20
QueuePriorityDefault int = 0
QueuePriorityPending int = -100
)
func ListUnfinishedMigrations(indexer cache.Indexer) []*v1.VirtualMachineInstanceMigration {
objs, err := indexer.ByIndex(controller.UnfinishedIndex, controller.UnfinishedIndex)
if err != nil {
log.Log.Reason(err).Errorf("Failed to use unfinished index")
return nil
}
var migrations []*v1.VirtualMachineInstanceMigration
for _, obj := range objs {
migration := obj.(*v1.VirtualMachineInstanceMigration)
if !migration.IsFinal() {
migrations = append(migrations, migration)
}
}
return migrations
}
func ListWorkloadUpdateMigrations(indexer cache.Indexer, vmiName, ns string) []v1.VirtualMachineInstanceMigration {
objs, err := indexer.ByIndex(controller.ByVMINameIndex, fmt.Sprintf("%s/%s", ns, vmiName))
if err != nil {
log.Log.Reason(err).Errorf("Failed to use byVMIName index for workload migrations")
return nil
}
migrations := []v1.VirtualMachineInstanceMigration{}
for _, obj := range objs {
migration := obj.(*v1.VirtualMachineInstanceMigration)
if migration.IsFinal() {
continue
}
if !metav1.HasAnnotation(migration.ObjectMeta, v1.WorkloadUpdateMigrationAnnotation) {
continue
}
migrations = append(migrations, *migration)
}
return migrations
}
func FilterRunningMigrations(migrations []*v1.VirtualMachineInstanceMigration) []*v1.VirtualMachineInstanceMigration {
runningMigrations := []*v1.VirtualMachineInstanceMigration{}
for _, migration := range migrations {
if migration.IsRunning() {
runningMigrations = append(runningMigrations, migration)
}
}
return runningMigrations
}
// IsMigrating returns true if a given VMI is still migrating and false otherwise.
func IsMigrating(vmi *v1.VirtualMachineInstance) bool {
if vmi == nil {
log.Log.V(4).Infof("checking if VMI is migrating, but it is empty")
return false
}
now := metav1.Now()
running := false
if vmi.Status.MigrationState != nil {
start := vmi.Status.MigrationState.StartTimestamp
stop := vmi.Status.MigrationState.EndTimestamp
if start != nil && (now.After(start.Time) || now.Equal(start)) {
running = true
}
if stop != nil && (now.After(stop.Time) || now.Equal(stop)) {
running = false
}
}
return running
}
func MigrationFailed(vmi *v1.VirtualMachineInstance) bool {
if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.Failed {
return true
}
return false
}
func VMIEvictionStrategy(clusterConfig *virtconfig.ClusterConfig, vmi *v1.VirtualMachineInstance) *v1.EvictionStrategy {
if vmi != nil && vmi.Spec.EvictionStrategy != nil {
return vmi.Spec.EvictionStrategy
}
clusterStrategy := clusterConfig.GetConfig().EvictionStrategy
return clusterStrategy
}
func VMIMigratableOnEviction(clusterConfig *virtconfig.ClusterConfig, vmi *v1.VirtualMachineInstance) bool {
strategy := VMIEvictionStrategy(clusterConfig, vmi)
if strategy == nil {
return false
}
switch *strategy {
case v1.EvictionStrategyLiveMigrate:
return true
case v1.EvictionStrategyLiveMigrateIfPossible:
return vmi.IsMigratable()
}
return false
}
func ActiveMigrationExistsForVMI(migrationIndexer cache.Indexer, vmi *v1.VirtualMachineInstance) (bool, error) {
objs, err := migrationIndexer.ByIndex(controller.ByVMINameIndex, fmt.Sprintf("%s/%s", vmi.Namespace, vmi.Name))
if err != nil {
return false, err
}
for _, obj := range objs {
migration := obj.(*v1.VirtualMachineInstanceMigration)
if migration.IsRunning() {
return true, nil
}
}
return false, nil
}
func PriorityFromMigration(migration *v1.VirtualMachineInstanceMigration) *int {
if migration.Spec.Priority == nil {
return pointer.P(QueuePriorityDefault)
}
switch *migration.Spec.Priority {
case v1.PrioritySystemCritical:
return pointer.P(QueuePrioritySystemCritical)
case v1.PriorityUserTriggered:
return pointer.P(QueuePriorityUserTriggered)
case v1.PrioritySystemMaintenance:
return pointer.P(QueuePrioritySystemMaintenance)
default:
return pointer.P(QueuePriorityDefault)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package dns
import (
"strings"
"k8s.io/apimachinery/pkg/util/validation"
v1 "kubevirt.io/api/core/v1"
)
// Sanitize hostname according to DNS label rules
// If the hostname is taken from vmi.Spec.Hostname
// then it already passed DNS label validations.
func SanitizeHostname(vmi *v1.VirtualMachineInstance) string {
hostName := strings.Split(vmi.Name, ".")[0]
if len(hostName) > validation.DNS1123LabelMaxLength {
hostName = hostName[:validation.DNS1123LabelMaxLength]
}
if vmi.Spec.Hostname != "" {
hostName = vmi.Spec.Hostname
}
return hostName
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package grpc
import (
"context"
"net"
"os"
"path/filepath"
"time"
"google.golang.org/grpc"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/util"
)
const (
CONNECT_TIMEOUT_SECONDS = 2
)
func DialSocket(socketPath string) (*grpc.ClientConn, error) {
return DialSocketWithTimeout(socketPath, 0)
}
func DialSocketWithTimeout(socketPath string, timeout int) (*grpc.ClientConn, error) {
options := []grpc.DialOption{
grpc.WithAuthority("localhost"),
grpc.WithInsecure(),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}),
grpc.WithBlock(), // dial sync in order to catch errors early
}
if timeout > 0 {
options = append(options,
grpc.WithTimeout(time.Duration(timeout+CONNECT_TIMEOUT_SECONDS)*time.Second),
)
}
// Combined with the Block option, this context controls how long to wait for establishing the connection.
// The dial timeout used above, controls the overall duration of the connection (including RCP calls).
ctx, cancel := context.WithTimeout(context.Background(), CONNECT_TIMEOUT_SECONDS*time.Second)
defer cancel()
return grpc.DialContext(ctx, socketPath, options...)
}
func CreateSocket(socketPath string) (net.Listener, error) {
os.RemoveAll(socketPath)
err := util.MkdirAllWithNosec(filepath.Dir(socketPath))
if err != nil {
log.Log.Reason(err).Errorf("unable to create directory for unix socket %v", socketPath)
return nil, err
}
socket, err := net.Listen("unix", socketPath)
if err != nil {
log.Log.Reason(err).Errorf("failed to create unix socket %v", socketPath)
return nil, err
}
return socket, nil
}
package nodes
import (
"context"
"encoding/json"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"kubevirt.io/client-go/kubecli"
)
func PatchNode(client kubecli.KubevirtClient, original, modified *corev1.Node) error {
originalBytes, err := json.Marshal(original)
if err != nil {
return fmt.Errorf("could not serialize original object: %v", err)
}
modifiedBytes, err := json.Marshal(modified)
if err != nil {
return fmt.Errorf("could not serialize modified object: %v", err)
}
patch, err := strategicpatch.CreateTwoWayMergePatch(originalBytes, modifiedBytes, corev1.Node{})
if err != nil {
return fmt.Errorf("could not create merge patch: %v", err)
}
if _, err := client.CoreV1().Nodes().Patch(context.Background(), original.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("could not patch the node: %v", err)
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package util
import (
"fmt"
"io"
"os"
"kubevirt.io/client-go/log"
)
// CloseIOAndCheckErr closes the file and check the returned error.
// If there was an error a log messages will be printed.
// If a valid address (not nil) is passed in err the function will also update the error
// Note: to update the error the calling funtion need to use named returned variable (If called as defer function)
func CloseIOAndCheckErr(c io.Closer, err *error) {
if ferr := c.Close(); ferr != nil {
log.DefaultLogger().Reason(ferr).Error("Error when closing file")
// Update the calling error only in case there wasn't a different error already
if err != nil && *err == nil {
*err = ferr
}
}
}
// The following helper functions wrap nosec annotations with os file functions that potentially assign files or directories
// access permissions that are viewed as not secure by gosec. Since kubevirt functionality and many e2e tests rely on such
// "unsafe" permission settings, e.g. the pathes shared between the virt-launcher and QEMU. the use of these functions avoids
// have too many nosec annotations scattered in the code and refers back to places where the "unsafe" permissions are set.
func MkdirAllWithNosec(pathName string) error {
// #nosec G301, Expect directory permissions to be 0750 or less
return os.MkdirAll(pathName, 0755)
}
func OpenFileWithNosec(pathName string, flag int) (*os.File, error) {
// #nosec G304 G302, Expect file permissions to be 0600 or less
return os.OpenFile(pathName, flag, 0644)
}
func WriteFileWithNosec(pathName string, data []byte) error {
// #nosec G306, Expect WriteFile permissions to be 0600 or less
return os.WriteFile(pathName, data, 0644)
}
func WriteBytes(f *os.File, c byte, n int64) error {
var err error
var i, total int64
buf := make([]byte, 1<<12)
for i = 0; i < 1<<12; i++ {
buf[i] = c
}
for i = 0; i < n>>12; i++ {
x, err := f.Write(buf)
total += int64(x)
if err != nil {
return err
}
}
x, err := f.Write(buf[:n&(1<<12-1)])
total += int64(x)
if err != nil {
return err
}
if total != n {
return fmt.Errorf("wrote %d bytes instead of %d", total, n)
}
return nil
}
package pdbs
import (
policyv1 "k8s.io/api/policy/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
virtv1 "kubevirt.io/api/core/v1"
)
func PDBsForVMI(vmi *virtv1.VirtualMachineInstance, pdbIndexer cache.Indexer) ([]*policyv1.PodDisruptionBudget, error) {
pbds, err := pdbIndexer.ByIndex(cache.NamespaceIndex, vmi.Namespace)
if err != nil {
return nil, err
}
pdbs := []*policyv1.PodDisruptionBudget{}
for _, pdb := range pbds {
p := v1.GetControllerOf(pdb.(*policyv1.PodDisruptionBudget))
if p != nil && p.Kind == virtv1.VirtualMachineInstanceGroupVersionKind.Kind &&
p.Name == vmi.Name {
pdbs = append(pdbs, pdb.(*policyv1.PodDisruptionBudget))
}
}
return pdbs, nil
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
package util
import v1 "kubevirt.io/api/core/v1"
// Check if a VMI spec requests Secure Execution
func IsSecureExecutionVMI(vmi *v1.VirtualMachineInstance) bool {
return vmi.Spec.Domain.LaunchSecurity != nil && vmi.Spec.Architecture == "s390x"
}
package trace
import (
"sync"
"time"
"k8s.io/utils/trace"
)
type Tracer struct {
traceMap map[string]*trace.Trace
mux sync.Mutex
Threshold time.Duration
}
func (t *Tracer) StartTrace(key string, name string, field ...trace.Field) {
t.mux.Lock()
defer t.mux.Unlock()
if t.traceMap == nil {
t.traceMap = make(map[string]*trace.Trace)
}
t.traceMap[key] = trace.New(name, field...)
return
}
func (t *Tracer) StopTrace(key string) {
if key == "" {
return
}
t.mux.Lock()
defer t.mux.Unlock()
if _, ok := t.traceMap[key]; !ok {
return
}
t.traceMap[key].LogIfLong(t.Threshold)
delete(t.traceMap, key)
return
}
// A trace Step adds a new step with a specific message.
// Call StepTrace after an execution step to record how long it took.
func (t *Tracer) StepTrace(key string, name string, field ...trace.Field) {
// Trace shouldn't be making noise unless the Trace is slow.
// Fail silently on errors like empty or incorrect keys.
if key == "" {
return
}
t.mux.Lock()
defer t.mux.Unlock()
if _, ok := t.traceMap[key]; !ok {
return
}
t.traceMap[key].Step(name, field...)
return
}
package util
import (
"crypto/rand"
"fmt"
"math/big"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
v1 "kubevirt.io/api/core/v1"
generatedscheme "kubevirt.io/client-go/kubevirt/scheme"
"kubevirt.io/client-go/log"
)
const (
ExtensionAPIServerAuthenticationConfigMap = "extension-apiserver-authentication"
RequestHeaderClientCAFileKey = "requestheader-client-ca-file"
VirtShareDir = "/var/run/kubevirt"
VirtImageVolumeDir = "/var/run/kubevirt-image-volume"
VirtKernelBootVolumeDir = "/var/run/kubevirt-kernel-boot"
VirtPrivateDir = "/var/run/kubevirt-private"
KubeletRoot = "/var/lib/kubelet"
KubeletPodsDir = KubeletRoot + "/pods"
HostRootMount = "/proc/1/root/"
ContainerBinary = "/container-disk-binary"
NonRootUID = 107
NonRootUserString = "qemu"
RootUser = 0
)
func IsNonRootVMI(vmi *v1.VirtualMachineInstance) bool {
_, ok := vmi.Annotations[v1.DeprecatedNonRootVMIAnnotation]
nonRoot := vmi.Status.RuntimeUser != 0
return ok || nonRoot
}
func isSRIOVVmi(vmi *v1.VirtualMachineInstance) bool {
for _, iface := range vmi.Spec.Domain.Devices.Interfaces {
if iface.SRIOV != nil {
return true
}
}
return false
}
// Check if a VMI spec requests GPU
func IsGPUVMI(vmi *v1.VirtualMachineInstance) bool {
if vmi.Spec.Domain.Devices.GPUs != nil && len(vmi.Spec.Domain.Devices.GPUs) != 0 {
return true
}
return false
}
// Check if a VMI spec requests VirtIO-FS
func IsVMIVirtiofsEnabled(vmi *v1.VirtualMachineInstance) bool {
if vmi.Spec.Domain.Devices.Filesystems != nil {
for _, fs := range vmi.Spec.Domain.Devices.Filesystems {
if fs.Virtiofs != nil {
return true
}
}
}
return false
}
// Check if a VMI spec requests a HostDevice
func IsHostDevVMI(vmi *v1.VirtualMachineInstance) bool {
if vmi.Spec.Domain.Devices.HostDevices != nil && len(vmi.Spec.Domain.Devices.HostDevices) != 0 {
return true
}
return false
}
// Check if a VMI spec requests a VFIO device
func IsVFIOVMI(vmi *v1.VirtualMachineInstance) bool {
if IsHostDevVMI(vmi) || IsGPUVMI(vmi) || isSRIOVVmi(vmi) {
return true
}
return false
}
func UseLaunchSecurity(vmi *v1.VirtualMachineInstance) bool {
return IsSEVVMI(vmi) || IsSecureExecutionVMI(vmi) || IsTDXVMI(vmi)
}
func IsAutoAttachVSOCK(vmi *v1.VirtualMachineInstance) bool {
return vmi.Spec.Domain.Devices.AutoattachVSOCK != nil && *vmi.Spec.Domain.Devices.AutoattachVSOCK
}
func ResourceNameToEnvVar(prefix string, resourceName string) string {
varName := strings.ToUpper(resourceName)
varName = strings.Replace(varName, "/", "_", -1)
varName = strings.Replace(varName, ".", "_", -1)
return fmt.Sprintf("%s_%s", prefix, varName)
}
// Checks if kernel boot is defined in a valid way
func HasKernelBootContainerImage(vmi *v1.VirtualMachineInstance) bool {
if vmi == nil {
return false
}
vmiFirmware := vmi.Spec.Domain.Firmware
if (vmiFirmware == nil) || (vmiFirmware.KernelBoot == nil) || (vmiFirmware.KernelBoot.Container == nil) {
return false
}
return true
}
// AlignImageSizeTo1MiB rounds down the size to the nearest multiple of 1MiB
// A warning or an error may get logged
// The caller is responsible for ensuring the rounded-down size is not 0
func AlignImageSizeTo1MiB(size int64, logger *log.FilteredLogger) int64 {
remainder := size % (1024 * 1024)
if remainder == 0 {
return size
} else {
newSize := size - remainder
if logger != nil {
if newSize == 0 {
logger.Errorf("disks must be at least 1MiB, %d bytes is too small", size)
} else {
logger.V(4).Infof("disk size is not 1MiB-aligned. Adjusting from %d down to %d.", size, newSize)
}
}
return newSize
}
}
func SetDefaultVolumeDisk(spec *v1.VirtualMachineInstanceSpec) {
diskAndFilesystemNames := make(map[string]struct{})
for _, disk := range spec.Domain.Devices.Disks {
diskAndFilesystemNames[disk.Name] = struct{}{}
}
for _, fs := range spec.Domain.Devices.Filesystems {
diskAndFilesystemNames[fs.Name] = struct{}{}
}
for _, volume := range spec.Volumes {
if _, foundDisk := diskAndFilesystemNames[volume.Name]; !foundDisk {
spec.Domain.Devices.Disks = append(
spec.Domain.Devices.Disks,
v1.Disk{
Name: volume.Name,
},
)
}
}
}
func CalcExpectedMemoryDumpSize(vmi *v1.VirtualMachineInstance) *resource.Quantity {
const memoryDumpOverhead = 100 * 1024 * 1024
domain := vmi.Spec.Domain
vmiMemoryReq := domain.Resources.Requests.Memory()
expectedPvcSize := resource.NewQuantity(int64(memoryDumpOverhead), vmiMemoryReq.Format)
expectedPvcSize.Add(*vmiMemoryReq)
return expectedPvcSize
}
// GenerateVMExportToken creates a cryptographically secure token for VM export
func GenerateVMExportToken() (string, error) {
const alphanums = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
const tokenLen = 20
ret := make([]byte, tokenLen)
for i := range ret {
num, err := rand.Int(rand.Reader, big.NewInt(int64(len(alphanums))))
if err != nil {
return "", err
}
ret[i] = alphanums[num.Int64()]
}
return string(ret), nil
}
// GenerateKubeVirtGroupVersionKind ensures a provided object registered with KubeVirts generated schema
// has GVK set correctly. This is required as client-go continues to return objects without
// TypeMeta set as set out in the following issue: https://github.com/kubernetes/client-go/issues/413
func GenerateKubeVirtGroupVersionKind(obj runtime.Object) (runtime.Object, error) {
objCopy := obj.DeepCopyObject()
gvks, _, err := generatedscheme.Scheme.ObjectKinds(objCopy)
if err != nil {
return nil, fmt.Errorf("could not get GroupVersionKind for object: %w", err)
}
objCopy.GetObjectKind().SetGroupVersionKind(gvks[0])
return objCopy, nil
}
package validating_webhooks
import (
"context"
"encoding/json"
"fmt"
"net/http"
admissionv1 "k8s.io/api/admission/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"kubevirt.io/kubevirt/pkg/util/webhooks"
"kubevirt.io/client-go/log"
)
type admitter interface {
Admit(context.Context, *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse
}
func NewPassingAdmissionResponse() *admissionv1.AdmissionResponse {
return &admissionv1.AdmissionResponse{Allowed: true}
}
func NewAdmissionResponse(causes []v1.StatusCause) *admissionv1.AdmissionResponse {
if len(causes) == 0 {
return NewPassingAdmissionResponse()
}
globalMessage := ""
for _, cause := range causes {
if globalMessage == "" {
globalMessage = cause.Message
} else {
globalMessage = fmt.Sprintf("%s, %s", globalMessage, cause.Message)
}
}
return &admissionv1.AdmissionResponse{
Result: &v1.Status{
Message: globalMessage,
Reason: v1.StatusReasonInvalid,
Code: http.StatusUnprocessableEntity,
Details: &v1.StatusDetails{
Causes: causes,
},
},
}
}
func Serve(resp http.ResponseWriter, req *http.Request, admitter admitter) {
review, err := webhooks.GetAdmissionReview(req)
if err != nil {
resp.WriteHeader(http.StatusBadRequest)
return
}
response := admissionv1.AdmissionReview{
TypeMeta: v1.TypeMeta{
// match the request version to be
// backwards compatible with v1beta1
APIVersion: review.APIVersion,
Kind: "AdmissionReview",
},
}
reviewResponse := admitter.Admit(req.Context(), review)
if reviewResponse != nil {
response.Response = reviewResponse
response.Response.UID = review.Request.UID
}
// reset the Object and OldObject, they are not needed in admitter response.
review.Request.Object = runtime.RawExtension{}
review.Request.OldObject = runtime.RawExtension{}
responseBytes, err := json.Marshal(response)
if err != nil {
log.Log.Reason(err).Errorf("failed json encode webhook response")
resp.WriteHeader(http.StatusBadRequest)
return
}
if _, err := resp.Write(responseBytes); err != nil {
log.Log.Reason(err).Errorf("failed to write webhook response")
resp.WriteHeader(http.StatusBadRequest)
return
}
}
package webhooks
import (
"encoding/json"
"fmt"
"io"
"net/http"
admissionv1 "k8s.io/api/admission/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
v12 "kubevirt.io/api/core/v1"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/virt-api/definitions"
"kubevirt.io/kubevirt/pkg/virt-api/webhooks"
)
// GetAdmissionReview
func GetAdmissionReview(r *http.Request) (*admissionv1.AdmissionReview, error) {
var body []byte
if r.Body != nil {
if data, err := io.ReadAll(r.Body); err == nil {
body = data
}
}
// verify the content type is accurate
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
return nil, fmt.Errorf("contentType=%s, expect application/json", contentType)
}
ar := &admissionv1.AdmissionReview{}
err := json.Unmarshal(body, ar)
return ar, err
}
// ToAdmissionResponseError
func ToAdmissionResponseError(err error) *admissionv1.AdmissionResponse {
log.Log.Reason(err).Error("admission generic error")
return &admissionv1.AdmissionResponse{
Result: &v1.Status{
Message: err.Error(),
Code: http.StatusBadRequest,
},
}
}
func ToAdmissionResponse(causes []v1.StatusCause) *admissionv1.AdmissionResponse {
log.Log.Infof("rejected vmi admission")
causeLen := len(causes)
lenDiff := 0
if causeLen > 10 {
causeLen = 10
lenDiff = len(causes) - 10
}
globalMessage := ""
for _, cause := range causes[:causeLen] {
if globalMessage == "" {
globalMessage = cause.Message
} else {
globalMessage = fmt.Sprintf("%s, %s", globalMessage, cause.Message)
}
}
if lenDiff > 0 {
globalMessage = fmt.Sprintf("%s, and %v more validation errors", globalMessage, lenDiff)
}
return &admissionv1.AdmissionResponse{
Result: &v1.Status{
Message: globalMessage,
Reason: v1.StatusReasonInvalid,
Code: http.StatusUnprocessableEntity,
Details: &v1.StatusDetails{
Causes: causes[:causeLen],
},
},
}
}
func ValidationErrorsToAdmissionResponse(errs []error) *admissionv1.AdmissionResponse {
var causes []v1.StatusCause
for _, e := range errs {
causes = append(causes,
v1.StatusCause{
Message: e.Error(),
},
)
}
return ToAdmissionResponse(causes)
}
func ValidateSchema(gvk schema.GroupVersionKind, data []byte) *admissionv1.AdmissionResponse {
in := map[string]interface{}{}
err := json.Unmarshal(data, &in)
if err != nil {
return ToAdmissionResponseError(err)
}
errs := definitions.Validator.Validate(gvk, in)
if len(errs) > 0 {
return ValidationErrorsToAdmissionResponse(errs)
}
return nil
}
func ValidateRequestResource(request v1.GroupVersionResource, group string, resource string) bool {
gvr := v1.GroupVersionResource{Group: group, Resource: resource}
for _, version := range v12.ApiSupportedWebhookVersions {
gvr.Version = version
if gvr == request {
return true
}
}
return false
}
func ValidateStatus(data []byte) *admissionv1.AdmissionResponse {
in := map[string]interface{}{}
err := json.Unmarshal(data, &in)
if err != nil {
return ToAdmissionResponseError(err)
}
obj := unstructured.Unstructured{Object: in}
gvk := obj.GroupVersionKind()
if gvk.Kind == "" {
return ValidationErrorsToAdmissionResponse([]error{fmt.Errorf("could not determine object kind")})
}
errs := definitions.Validator.ValidateStatus(gvk, in)
if len(errs) > 0 {
return ValidationErrorsToAdmissionResponse(errs)
}
return nil
}
func GetVMIFromAdmissionReview(ar *admissionv1.AdmissionReview) (new *v12.VirtualMachineInstance, old *v12.VirtualMachineInstance, err error) {
if !ValidateRequestResource(ar.Request.Resource, webhooks.VirtualMachineInstanceGroupVersionResource.Group, webhooks.VirtualMachineInstanceGroupVersionResource.Resource) {
return nil, nil, fmt.Errorf("expect resource to be '%s'", webhooks.VirtualMachineInstanceGroupVersionResource.Resource)
}
raw := ar.Request.Object.Raw
newVMI := v12.VirtualMachineInstance{}
err = json.Unmarshal(raw, &newVMI)
if err != nil {
return nil, nil, err
}
if ar.Request.Operation == admissionv1.Update {
raw := ar.Request.OldObject.Raw
oldVMI := v12.VirtualMachineInstance{}
err = json.Unmarshal(raw, &oldVMI)
if err != nil {
return nil, nil, err
}
return &newVMI, &oldVMI, nil
}
return &newVMI, nil, nil
}
func GetVMFromAdmissionReview(ar *admissionv1.AdmissionReview) (new *v12.VirtualMachine, old *v12.VirtualMachine, err error) {
if !ValidateRequestResource(ar.Request.Resource, webhooks.VirtualMachineGroupVersionResource.Group, webhooks.VirtualMachineGroupVersionResource.Resource) {
return nil, nil, fmt.Errorf("expect resource to be '%s'", webhooks.VirtualMachineGroupVersionResource.Resource)
}
raw := ar.Request.Object.Raw
newVM := v12.VirtualMachine{}
err = json.Unmarshal(raw, &newVM)
if err != nil {
return nil, nil, err
}
if ar.Request.Operation == admissionv1.Update {
raw := ar.Request.OldObject.Raw
oldVM := v12.VirtualMachine{}
err = json.Unmarshal(raw, &oldVM)
if err != nil {
return nil, nil, err
}
return &newVM, &oldVM, nil
}
return &newVM, nil, nil
}
func GetInstanceTypeSpecFromAdmissionRequest(request *admissionv1.AdmissionRequest) (new *instancetypev1beta1.VirtualMachineInstancetypeSpec, old *instancetypev1beta1.VirtualMachineInstancetypeSpec, err error) {
raw := request.Object.Raw
instancetypeObj := instancetypev1beta1.VirtualMachineInstancetype{}
err = json.Unmarshal(raw, &instancetypeObj)
if err != nil {
return nil, nil, err
}
if request.Operation == admissionv1.Update {
raw := request.OldObject.Raw
oldInstancetypeObj := instancetypev1beta1.VirtualMachineInstancetype{}
err = json.Unmarshal(raw, &oldInstancetypeObj)
if err != nil {
return nil, nil, err
}
return &instancetypeObj.Spec, &oldInstancetypeObj.Spec, nil
}
return &instancetypeObj.Spec, nil, nil
}
func GetPreferenceSpecFromAdmissionRequest(request *admissionv1.AdmissionRequest) (new *instancetypev1beta1.VirtualMachinePreferenceSpec, old *instancetypev1beta1.VirtualMachinePreferenceSpec, err error) {
raw := request.Object.Raw
preferenceObj := instancetypev1beta1.VirtualMachinePreference{}
err = json.Unmarshal(raw, &preferenceObj)
if err != nil {
return nil, nil, err
}
if request.Operation == admissionv1.Update {
raw := request.OldObject.Raw
oldPreferenceObj := instancetypev1beta1.VirtualMachinePreference{}
err = json.Unmarshal(raw, &oldPreferenceObj)
if err != nil {
return nil, nil, err
}
return &preferenceObj.Spec, &oldPreferenceObj.Spec, nil
}
return &preferenceObj.Spec, nil, nil
}
func GetVMIMFromAdmissionRequest(ar *admissionv1.AdmissionReview) (new *v12.VirtualMachineInstanceMigration, old *v12.VirtualMachineInstanceMigration, err error) {
raw := ar.Request.Object.Raw
migrationObj := v12.VirtualMachineInstanceMigration{}
err = json.Unmarshal(raw, &migrationObj)
if err != nil {
return nil, nil, err
}
if ar.Request.Operation == admissionv1.Update {
raw := ar.Request.OldObject.Raw
oldMigrationObj := v12.VirtualMachineInstanceMigration{}
err = json.Unmarshal(raw, &oldMigrationObj)
if err != nil {
return nil, nil, err
}
return &migrationObj, &oldMigrationObj, nil
}
return &migrationObj, nil, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package definitions
import (
"fmt"
"net/http"
"path"
"reflect"
clonebase "kubevirt.io/api/clone"
clone "kubevirt.io/api/clone/v1beta1"
"kubevirt.io/api/instancetype"
"kubevirt.io/api/migrations"
migrationsv1 "kubevirt.io/api/migrations/v1alpha1"
restful "github.com/emicklei/go-restful/v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
backupv1 "kubevirt.io/api/backup/v1alpha1"
v1 "kubevirt.io/api/core/v1"
exportv1 "kubevirt.io/api/export/v1beta1"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
poolv1beta1 "kubevirt.io/api/pool/v1beta1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
mime "kubevirt.io/kubevirt/pkg/rest"
)
const (
obj = " object."
objs = " objects."
watch = "Watch a "
)
func ComposeAPIDefinitions() []*restful.WebService {
var result []*restful.WebService
for _, f := range []func() []*restful.WebService{
kubevirtApiServiceDefinitions,
snapshotApiServiceDefinitions,
exportApiServiceDefinitions,
backupApiServiceDefinitions,
instancetypeApiServiceDefinitions,
migrationPoliciesApiServiceDefinitions,
poolApiServiceDefinitions,
vmCloneDefinitions,
} {
result = append(result, f()...)
}
return result
}
func kubevirtApiServiceDefinitions() []*restful.WebService {
vmiGVR := schema.GroupVersionResource{Group: v1.GroupVersion.Group, Version: v1.GroupVersion.Version, Resource: "virtualmachineinstances"}
vmirsGVR := schema.GroupVersionResource{Group: v1.GroupVersion.Group, Version: v1.GroupVersion.Version, Resource: "virtualmachineinstancereplicasets"}
vmipGVR := schema.GroupVersionResource{Group: v1.GroupVersion.Group, Version: v1.GroupVersion.Version, Resource: "virtualmachineinstancepresets"}
vmGVR := schema.GroupVersionResource{Group: v1.GroupVersion.Group, Version: v1.GroupVersion.Version, Resource: "virtualmachines"}
migrationGVR := schema.GroupVersionResource{Group: v1.GroupVersion.Group, Version: v1.GroupVersion.Version, Resource: "virtualmachineinstancemigrations"}
kubeVirtGVR := schema.GroupVersionResource{Group: v1.GroupVersion.Group, Version: v1.GroupVersion.Version, Resource: "kubevirt"}
ws, err := groupVersionProxyBase(v1.GroupVersion)
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, kubeVirtGVR, &v1.KubeVirt{}, v1.KubeVirtGroupVersionKind.Kind, &v1.KubeVirtList{})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, vmiGVR, &v1.VirtualMachineInstance{}, v1.VirtualMachineInstanceGroupVersionKind.Kind, &v1.VirtualMachineInstanceList{})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, vmirsGVR, &v1.VirtualMachineInstanceReplicaSet{}, v1.VirtualMachineInstanceReplicaSetGroupVersionKind.Kind, &v1.VirtualMachineInstanceReplicaSetList{})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, vmipGVR, &v1.VirtualMachineInstancePreset{}, v1.VirtualMachineInstancePresetGroupVersionKind.Kind, &v1.VirtualMachineInstancePresetList{})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, vmGVR, &v1.VirtualMachine{}, v1.VirtualMachineGroupVersionKind.Kind, &v1.VirtualMachineList{})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, migrationGVR, &v1.VirtualMachineInstanceMigration{}, v1.VirtualMachineInstanceMigrationGroupVersionKind.Kind, &v1.VirtualMachineInstanceMigrationList{})
if err != nil {
panic(err)
}
ws2, err := resourceProxyAutodiscovery(vmiGVR)
if err != nil {
panic(err)
}
return []*restful.WebService{ws, ws2}
}
func snapshotApiServiceDefinitions() []*restful.WebService {
vmsGVR := snapshotv1.SchemeGroupVersion.WithResource("virtualmachinesnapshots")
vmscGVR := snapshotv1.SchemeGroupVersion.WithResource("virtualmachinesnapshotcontents")
vmrGVR := snapshotv1.SchemeGroupVersion.WithResource("virtualmachinerestores")
ws, err := groupVersionProxyBase(schema.GroupVersion{Group: snapshotv1.SchemeGroupVersion.Group, Version: snapshotv1.SchemeGroupVersion.Version})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, vmsGVR, &snapshotv1.VirtualMachineSnapshot{}, "VirtualMachineSnapshot", &snapshotv1.VirtualMachineSnapshotList{})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, vmscGVR, &snapshotv1.VirtualMachineSnapshotContent{}, "VirtualMachineSnapshotContent", &snapshotv1.VirtualMachineSnapshotContentList{})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, vmrGVR, &snapshotv1.VirtualMachineRestore{}, "VirtualMachineRestore", &snapshotv1.VirtualMachineRestoreList{})
if err != nil {
panic(err)
}
ws2, err := resourceProxyAutodiscovery(vmsGVR)
if err != nil {
panic(err)
}
return []*restful.WebService{ws, ws2}
}
func exportApiServiceDefinitions() []*restful.WebService {
exportsGVR := exportv1.SchemeGroupVersion.WithResource("virtualmachineexports")
ws, err := groupVersionProxyBase(schema.GroupVersion{Group: exportv1.SchemeGroupVersion.Group, Version: exportv1.SchemeGroupVersion.Version})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, exportsGVR, &exportv1.VirtualMachineExport{}, "VirtualMachineExport", &exportv1.VirtualMachineExportList{})
if err != nil {
panic(err)
}
ws2, err := resourceProxyAutodiscovery(exportsGVR)
if err != nil {
panic(err)
}
return []*restful.WebService{ws, ws2}
}
func migrationPoliciesApiServiceDefinitions() []*restful.WebService {
mpGVR := migrationsv1.SchemeGroupVersion.WithResource(migrations.ResourceMigrationPolicies)
ws, err := groupVersionProxyBase(schema.GroupVersion{Group: migrationsv1.SchemeGroupVersion.Group, Version: migrationsv1.SchemeGroupVersion.Version})
if err != nil {
panic(err)
}
ws, err = genericClusterResourceProxy(ws, mpGVR, &migrationsv1.MigrationPolicy{}, migrationsv1.MigrationPolicyKind.Kind, &migrationsv1.MigrationPolicyList{})
if err != nil {
panic(err)
}
ws2, err := resourceProxyAutodiscovery(mpGVR)
if err != nil {
panic(err)
}
return []*restful.WebService{ws, ws2}
}
func instancetypeApiServiceDefinitions() []*restful.WebService {
instancetypeGVR := instancetypev1beta1.SchemeGroupVersion.WithResource(instancetype.PluralResourceName)
clusterInstancetypeGVR := instancetypev1beta1.SchemeGroupVersion.WithResource(instancetype.ClusterPluralResourceName)
preferenceGVR := instancetypev1beta1.SchemeGroupVersion.WithResource(instancetype.PluralPreferenceResourceName)
clusterPreferenceGVR := instancetypev1beta1.SchemeGroupVersion.WithResource(instancetype.ClusterPluralPreferenceResourceName)
ws, err := groupVersionProxyBase(instancetypev1beta1.SchemeGroupVersion)
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, instancetypeGVR, &instancetypev1beta1.VirtualMachineInstancetype{}, "VirtualMachineInstancetype", &instancetypev1beta1.VirtualMachineInstancetypeList{})
if err != nil {
panic(err)
}
ws, err = genericClusterResourceProxy(ws, clusterInstancetypeGVR, &instancetypev1beta1.VirtualMachineClusterInstancetype{}, "VirtualMachineClusterInstancetype", &instancetypev1beta1.VirtualMachineClusterInstancetypeList{})
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, preferenceGVR, &instancetypev1beta1.VirtualMachinePreference{}, "VirtualMachinePreference", &instancetypev1beta1.VirtualMachinePreferenceList{})
if err != nil {
panic(err)
}
ws, err = genericClusterResourceProxy(ws, clusterPreferenceGVR, &instancetypev1beta1.VirtualMachineClusterPreference{}, "VirtualMachineClusterPreference", &instancetypev1beta1.VirtualMachineClusterPreferenceList{})
if err != nil {
panic(err)
}
ws2, err := resourceProxyAutodiscovery(instancetypeGVR)
if err != nil {
panic(err)
}
return []*restful.WebService{ws, ws2}
}
func poolApiServiceDefinitions() []*restful.WebService {
poolGVR := poolv1beta1.SchemeGroupVersion.WithResource("virtualmachinepools")
ws, err := groupVersionProxyBase(poolv1beta1.SchemeGroupVersion)
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, poolGVR, &poolv1beta1.VirtualMachinePool{}, "VirtualMachinePool", &poolv1beta1.VirtualMachinePoolList{})
if err != nil {
panic(err)
}
ws2, err := resourceProxyAutodiscovery(poolGVR)
if err != nil {
panic(err)
}
return []*restful.WebService{ws, ws2}
}
func vmCloneDefinitions() []*restful.WebService {
mpGVR := clone.SchemeGroupVersion.WithResource(clonebase.ResourceVMClonePlural)
ws, err := groupVersionProxyBase(schema.GroupVersion{Group: clone.SchemeGroupVersion.Group, Version: clone.SchemeGroupVersion.Version})
if err != nil {
panic(err)
}
ws, err = genericClusterResourceProxy(ws, mpGVR, &clone.VirtualMachineClone{}, clone.VirtualMachineCloneKind.Kind, &clone.VirtualMachineCloneList{})
if err != nil {
panic(err)
}
ws2, err := resourceProxyAutodiscovery(mpGVR)
if err != nil {
panic(err)
}
return []*restful.WebService{ws, ws2}
}
func backupApiServiceDefinitions() []*restful.WebService {
backupsGVR := backupv1.SchemeGroupVersion.WithResource("virtualmachinebackups")
ws, err := groupVersionProxyBase(backupv1.SchemeGroupVersion)
if err != nil {
panic(err)
}
ws, err = genericNamespacedResourceProxy(ws, backupsGVR, &backupv1.VirtualMachineBackup{}, "VirtualMachineBackup", &backupv1.VirtualMachineBackupList{})
if err != nil {
panic(err)
}
ws2, err := resourceProxyAutodiscovery(backupsGVR)
if err != nil {
panic(err)
}
return []*restful.WebService{ws, ws2}
}
func groupVersionProxyBase(gv schema.GroupVersion) (*restful.WebService, error) {
ws := new(restful.WebService)
ws.Doc("The KubeVirt API, a virtual machine management.")
ws.Path(GroupVersionBasePath(gv))
ws.Route(
ws.GET("/").Produces(mime.MIME_JSON).Writes(metav1.APIResourceList{}).
To(noop).
Operation(fmt.Sprintf("getAPIResources-%s-%s", gv.Group, gv.Version)).
Doc("Get KubeVirt API Resources").
Returns(http.StatusOK, "OK", metav1.APIResourceList{}).
Returns(http.StatusNotFound, "Not Found", ""),
)
return ws, nil
}
func genericNamespacedResourceProxy(ws *restful.WebService, gvr schema.GroupVersionResource, objPointer runtime.Object, objKind string, objListPointer runtime.Object) (*restful.WebService, error) {
objExample := reflect.ValueOf(objPointer).Elem().Interface()
listExample := reflect.ValueOf(objListPointer).Elem().Interface()
ws.Route(addNamespaceParam(ws,
createOperation(ws, NamespacedResourceBasePath(gvr), objExample).
Operation("createNamespaced"+objKind).
Doc("Create a "+objKind+obj),
))
ws.Route(addNamespaceParam(ws,
replaceOperation(ws, NamespacedResourcePath(gvr), objExample).
Operation("replaceNamespaced"+objKind).
Doc("Update a "+objKind+obj),
))
ws.Route(addNamespaceParam(ws,
deleteOperation(ws, NamespacedResourcePath(gvr)).
Operation("deleteNamespaced"+objKind).
Doc("Delete a "+objKind+obj),
))
ws.Route(addNamespaceParam(ws,
readOperation(ws, NamespacedResourcePath(gvr), objExample).
Operation("readNamespaced"+objKind).
Doc("Get a "+objKind+obj),
))
ws.Route(
listOperation(ws, gvr.Resource, listExample).
Operation("list" + objKind + "ForAllNamespaces").
Doc("Get a list of all " + objKind + objs),
)
ws.Route(addNamespaceParam(ws,
patchOperation(ws, NamespacedResourcePath(gvr), objExample).
Operation("patchNamespaced"+objKind).
Doc("Patch a "+objKind+obj),
))
// TODO, implement watch. For now it is here to provide swagger doc only
ws.Route(
watchOperation(ws, "/watch/"+gvr.Resource).
Operation("watch" + objKind + "ListForAllNamespaces").
Doc(watch + objKind + "List object."),
)
// TODO, implement watch. For now it is here to provide swagger doc only
ws.Route(addNamespaceParam(ws,
watchOperation(ws, "/watch"+NamespacedResourceBasePath(gvr)).
Operation("watchNamespaced"+objKind).
Doc(watch+objKind+obj),
))
ws.Route(addNamespaceParam(ws,
listOperation(ws, NamespacedResourceBasePath(gvr), listExample).
Operation("listNamespaced"+objKind).
Doc("Get a list of "+objKind+objs),
))
ws.Route(
deleteCollectionOperation(ws, NamespacedResourceBasePath(gvr)).
Operation("deleteCollectionNamespaced" + objKind).
Doc("Delete a collection of " + objKind + objs),
)
return ws, nil
}
func genericClusterResourceProxy(ws *restful.WebService, gvr schema.GroupVersionResource, objPointer runtime.Object, objKind string, objListPointer runtime.Object) (*restful.WebService, error) {
objExample := reflect.ValueOf(objPointer).Elem().Interface()
listExample := reflect.ValueOf(objListPointer).Elem().Interface()
ws.Route(
createOperation(ws, ClusterResourceBasePath(gvr), objExample).
Operation("create" + objKind).
Doc("Create a " + objKind + obj),
)
ws.Route(
replaceOperation(ws, ClusterResourcePath(gvr), objExample).
Operation("replace" + objKind).
Doc("Update a " + objKind + obj),
)
ws.Route(
deleteOperation(ws, ClusterResourcePath(gvr)).
Operation("delete" + objKind).
Doc("Delete a " + objKind + obj),
)
ws.Route(
readOperation(ws, ClusterResourcePath(gvr), objExample).
Operation("read" + objKind).
Doc("Get a " + objKind + obj),
)
ws.Route(
listOperation(ws, gvr.Resource, listExample).
Operation("list" + objKind).
Doc("Get a list of " + objKind + objs),
)
ws.Route(
patchOperation(ws, ClusterResourcePath(gvr), objExample).
Operation("patch" + objKind).
Doc("Patch a " + objKind + obj),
)
// TODO, implement watch. For now it is here to provide swagger doc only
ws.Route(
watchOperation(ws, "/watch/"+gvr.Resource).
Operation("watch" + objKind + "ListForAllNamespaces").
Doc(watch + objKind + "List object."),
)
ws.Route(
deleteCollectionOperation(ws, ClusterResourceBasePath(gvr)).
Operation("deleteCollection" + objKind).
Doc("Delete a collection of " + objKind + objs),
)
return ws, nil
}
func resourceProxyAutodiscovery(gvr schema.GroupVersionResource) (*restful.WebService, error) {
ws := new(restful.WebService)
ws.Path(GroupBasePath(gvr.GroupVersion()))
ws.Route(ws.GET("/").
Produces(mime.MIME_JSON).Writes(metav1.APIGroup{}).
To(noop).
Doc("Get a KubeVirt API group").
Operation("getAPIGroup-"+gvr.Group).
Returns(http.StatusOK, "OK", metav1.APIGroup{}).
Returns(http.StatusNotFound, "Not Found", ""))
return ws, nil
}
func createOperation(ws *restful.WebService, subPath string, objExample interface{}) *restful.RouteBuilder {
return ws.POST(subPath).
Produces(mime.MIME_JSON, mime.MIME_YAML).
Consumes(mime.MIME_JSON, mime.MIME_YAML).
To(noop).Reads(objExample).Writes(objExample).
Returns(http.StatusOK, "OK", objExample).
Returns(http.StatusCreated, "Created", objExample).
Returns(http.StatusAccepted, "Accepted", objExample).
Returns(http.StatusUnauthorized, "Unauthorized", "")
}
func replaceOperation(ws *restful.WebService, subPath string, objExample interface{}) *restful.RouteBuilder {
return addPutParams(ws,
ws.PUT(subPath).
Produces(mime.MIME_JSON, mime.MIME_YAML).
Consumes(mime.MIME_JSON, mime.MIME_YAML).
To(noop).Reads(objExample).Writes(objExample).
Returns(http.StatusOK, "OK", objExample).
Returns(http.StatusCreated, "Create", objExample).
Returns(http.StatusUnauthorized, "Unauthorized", ""),
)
}
func patchOperation(ws *restful.WebService, subPath string, objExample interface{}) *restful.RouteBuilder {
return addPatchParams(ws,
ws.PATCH(subPath).
Consumes(mime.MIME_JSON_PATCH, mime.MIME_MERGE_PATCH).
Produces(mime.MIME_JSON).
To(noop).
Writes(objExample).Reads(metav1.Patch{}).
Returns(http.StatusOK, "OK", objExample).
Returns(http.StatusUnauthorized, "Unauthorized", ""),
)
}
func deleteOperation(ws *restful.WebService, subPath string) *restful.RouteBuilder {
return addDeleteParams(ws,
ws.DELETE(subPath).
Produces(mime.MIME_JSON, mime.MIME_YAML).
Consumes(mime.MIME_JSON, mime.MIME_YAML).
To(noop).
Reads(metav1.DeleteOptions{}).Writes(metav1.Status{}).
Returns(http.StatusOK, "OK", metav1.Status{}).
Returns(http.StatusUnauthorized, "Unauthorized", ""),
)
}
func deleteCollectionOperation(ws *restful.WebService, subPath string) *restful.RouteBuilder {
return addDeleteListParams(ws,
ws.DELETE(subPath).
Produces(mime.MIME_JSON, mime.MIME_YAML).
To(noop).Writes(metav1.Status{}).
Returns(http.StatusOK, "OK", metav1.Status{}).
Returns(http.StatusUnauthorized, "Unauthorized", ""),
)
}
func readOperation(ws *restful.WebService, subPath string, objExample interface{}) *restful.RouteBuilder {
return addGetParams(ws,
ws.GET(subPath).
Produces(mime.MIME_JSON, mime.MIME_YAML, mime.MIME_JSON_STREAM).
To(noop).Writes(objExample).
Returns(http.StatusOK, "OK", objExample).
Returns(http.StatusUnauthorized, "Unauthorized", ""),
)
}
func listOperation(ws *restful.WebService, subPath string, listExample interface{}) *restful.RouteBuilder {
return addGetListParams(ws,
ws.GET(subPath).
Produces(mime.MIME_JSON, mime.MIME_YAML, mime.MIME_JSON_STREAM).
To(noop).Writes(listExample).
Returns(http.StatusOK, "OK", listExample).
Returns(http.StatusUnauthorized, "Unauthorized", ""),
)
}
func watchOperation(ws *restful.WebService, subPath string) *restful.RouteBuilder {
return addWatchGetListParams(ws,
ws.GET(subPath).
Produces(mime.MIME_JSON).
To(noop).Writes(metav1.WatchEvent{}).
Returns(http.StatusOK, "OK", metav1.WatchEvent{}).
Returns(http.StatusUnauthorized, "Unauthorized", ""),
)
}
func addCollectionParams(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return builder.Param(continueParam(ws)).
Param(fieldSelectorParam(ws)).
Param(includeUninitializedParam(ws)).
Param(labelSelectorParam(ws)).
Param(limitParam(ws)).
Param(resourceVersionParam(ws)).
Param(timeoutSecondsParam(ws)).
Param(watchParam(ws))
}
func addNamespaceParam(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return builder.Param(NamespaceParam(ws))
}
func addWatchGetListParams(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return addCollectionParams(ws, builder)
}
func addGetListParams(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return addCollectionParams(ws, builder)
}
func addDeleteListParams(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return addCollectionParams(ws, builder)
}
func addGetParams(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return builder.Param(NameParam(ws)).
Param(exactParam(ws)).
Param(exportParam(ws))
}
func addPutParams(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return builder.Param(NameParam(ws))
}
func addDeleteParams(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return builder.Param(NameParam(ws)).
Param(gracePeriodSecondsParam(ws)).
Param(orphanDependentsParam(ws)).
Param(propagationPolicyParam(ws))
}
func addPatchParams(ws *restful.WebService, builder *restful.RouteBuilder) *restful.RouteBuilder {
return builder.Param(NameParam(ws))
}
const (
NamespaceParamName = "namespace"
NameParamName = "name"
MoveCursorParamName = "moveCursor"
PreserveSessionParamName = "preserveSession"
)
func NameParam(ws *restful.WebService) *restful.Parameter {
return ws.PathParameter(NameParamName, "Name of the resource").Required(true)
}
func NamespaceParam(ws *restful.WebService) *restful.Parameter {
return ws.PathParameter(NamespaceParamName, "Object name and auth scope, such as for teams and projects").Required(true)
}
func MoveCursorParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter(MoveCursorParamName, "Move the cursor on the VNC display to wake up the screen").DataType("boolean").DefaultValue("false")
}
func PreserveSessionParam(ws *restful.WebService) *restful.Parameter {
return ws.
QueryParameter(PreserveSessionParamName, "Connect only if ongoing session is not disturbed.").
DataType("boolean").
DefaultValue("false")
}
func labelSelectorParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("labelSelector", "A selector to restrict the list of returned objects by their labels. Defaults to everything")
}
func fieldSelectorParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("fieldSelector", "A selector to restrict the list of returned objects by their fields. Defaults to everything.")
}
func resourceVersionParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("resourceVersion", "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.")
}
func timeoutSecondsParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("timeoutSeconds", "TimeoutSeconds for the list/watch call.").DataType("integer")
}
func includeUninitializedParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("includeUninitialized", "If true, partially initialized resources are included in the response.").DataType("boolean")
}
func watchParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("watch", "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.").DataType("boolean")
}
func limitParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("limit", "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.").DataType("integer")
}
func continueParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("continue", "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.")
}
func exactParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("exact", "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.").DataType("boolean")
}
func exportParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("export", "Should this value be exported. Export strips fields that a user can not specify.").DataType("boolean")
}
func gracePeriodSecondsParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("gracePeriodSeconds", "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.").DataType("integer")
}
func orphanDependentsParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("orphanDependents", "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.").DataType("boolean")
}
func propagationPolicyParam(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter("propagationPolicy", "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.")
}
func GroupBasePath(gvr schema.GroupVersion) string {
return fmt.Sprintf("/apis/%s", gvr.Group)
}
func GroupVersionBasePath(gvr schema.GroupVersion) string {
return fmt.Sprintf("/apis/%s/%s", gvr.Group, gvr.Version)
}
func NamespacedResourceBasePath(gvr schema.GroupVersionResource) string {
return fmt.Sprintf("/namespaces/{namespace}/%s", gvr.Resource)
}
func NamespacedResourcePath(gvr schema.GroupVersionResource) string {
return fmt.Sprintf("/namespaces/{namespace}/%s/{name}", gvr.Resource)
}
func ClusterResourceBasePath(gvr schema.GroupVersionResource) string {
return gvr.Resource
}
func ClusterResourcePath(gvr schema.GroupVersionResource) string {
return fmt.Sprintf("%s/{name}", gvr.Resource)
}
func SubResourcePath(subResource string) string {
return path.Join("/", subResource)
}
const (
PortParamName = "port"
TLSParamName = "tls"
PortPath = "/{port}"
ProtocolParamName = "protocol"
ProtocolPath = "/{protocol}"
)
func PortForwardPortParameter(ws *restful.WebService) *restful.Parameter {
return ws.PathParameter(PortParamName, "The target port for portforward on the VirtualMachineInstance.")
}
func PortForwardProtocolParameter(ws *restful.WebService) *restful.Parameter {
return ws.PathParameter(ProtocolParamName, "The protocol for portforward on the VirtualMachineInstance.")
}
func noop(_ *restful.Request, _ *restful.Response) {}
func VSOCKPortParameter(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter(PortParamName, "The port which the VSOCK application listens to.").DataType("integer").Required(true)
}
func VSOCKTLSParameter(ws *restful.WebService) *restful.Parameter {
return ws.QueryParameter(TLSParamName, "Weather to request a TLS encrypted session from the VSOCK application.").DataType("boolean").Required(false)
}
// Copyright 2025 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package definitions
import "k8s.io/apimachinery/pkg/runtime/schema"
// noopValidator is a stub for fuzzing that skips OpenAPI schema validation.
// This avoids the 111-second initialization of ComposeAPIDefinitions().
type noopValidator struct{}
func (n *noopValidator) Validate(gvk schema.GroupVersionKind, obj map[string]interface{}) []error {
return nil
}
func (n *noopValidator) ValidateSpec(gvk schema.GroupVersionKind, obj map[string]interface{}) []error {
return nil
}
func (n *noopValidator) ValidateStatus(gvk schema.GroupVersionKind, obj map[string]interface{}) []error {
return nil
}
var Validator = &noopValidator{}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package webhooks
import (
"fmt"
"slices"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
// ValidateVirtualMachineInstanceAmd64Setting is a validation function for validating-webhook on Amd64
func ValidateVirtualMachineInstanceAmd64Setting(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var statusCauses []metav1.StatusCause
validateWatchdogAmd64(field, spec, &statusCauses)
validateVideoTypeAmd64(field, spec, &statusCauses)
return statusCauses
}
func validateVideoTypeAmd64(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
if spec.Domain.Devices.Video == nil {
return
}
videoType := spec.Domain.Devices.Video.Type
validTypes := []string{"vga", "cirrus", "virtio", "ramfb", "bochs"}
if !slices.Contains(validTypes, videoType) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("video model '%s' is not supported on amd64 architecture", videoType),
Field: field.Child("domain", "devices", "video").Child("type").String(),
})
}
}
func validateWatchdogAmd64(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
watchdog := spec.Domain.Devices.Watchdog
if watchdog == nil {
return
}
if !isOnlyI6300ESBWatchdog(watchdog) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "amd64 only supports I6300ESB watchdog device",
Field: field.Child("domain", "devices", "watchdog").String(),
})
}
}
func isOnlyI6300ESBWatchdog(watchdog *v1.Watchdog) bool {
return watchdog.WatchdogDevice.I6300ESB != nil && watchdog.WatchdogDevice.Diag288 == nil
}
func ValidateLaunchSecurityAmd64(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
launchSecurity := spec.Domain.LaunchSecurity
fg := ""
var selectedTypes []string
if launchSecurity.SEV != nil {
selectedTypes = append(selectedTypes, "SEV")
fg = featuregate.WorkloadEncryptionSEV
}
if launchSecurity.SNP != nil {
selectedTypes = append(selectedTypes, "SNP")
fg = featuregate.WorkloadEncryptionSEV
}
if launchSecurity.TDX != nil {
selectedTypes = append(selectedTypes, "TDX")
fg = featuregate.WorkloadEncryptionTDX
}
// We always get a valid launchSecurity type after this check
if len(selectedTypes) != 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeForbidden,
Message: "One and only one launchSecurity type can be set",
Field: field.Child("launchSecurity").String(),
})
} else if ((launchSecurity.SEV != nil || launchSecurity.SNP != nil) && !config.WorkloadEncryptionSEVEnabled()) ||
(launchSecurity.TDX != nil && !config.WorkloadEncryptionTDXEnabled()) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s feature gate is not enabled in kubevirt-config", fg),
Field: field.Child("launchSecurity").String(),
})
} else {
features := spec.Domain.Features
if launchSecurity.TDX != nil &&
(features != nil && features.SMM != nil && (features.SMM.Enabled == nil || *features.SMM.Enabled)) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "TDX does not work along with SMM",
Field: field.Child("launchSecurity").String(),
})
}
firmware := spec.Domain.Firmware
if firmware == nil || firmware.Bootloader == nil || firmware.Bootloader.EFI == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s requires OVMF (UEFI)", selectedTypes[0]),
Field: field.Child("launchSecurity").String(),
})
} else {
efi := firmware.Bootloader.EFI
if (launchSecurity.SEV != nil || launchSecurity.SNP != nil) &&
(efi.SecureBoot == nil || *efi.SecureBoot) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s does not work along with SecureBoot", selectedTypes[0]),
Field: field.Child("launchSecurity").String(),
})
}
if (launchSecurity.SNP != nil || launchSecurity.TDX != nil) &&
(efi.Persistent != nil && *efi.Persistent) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s does not work along with Persistent EFI variables", selectedTypes[0]),
Field: field.Child("launchSecurity").String(),
})
}
}
startStrategy := spec.StartStrategy
if launchSecurity.SEV != nil &&
(startStrategy == nil || *startStrategy != v1.StartStrategyPaused) {
if launchSecurity.SEV.Attestation != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("SEV attestation requires VMI StartStrategy '%s'", v1.StartStrategyPaused),
Field: field.Child("launchSecurity").String(),
})
}
}
for _, iface := range spec.Domain.Devices.Interfaces {
if iface.BootOrder != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s does not work with bootable NICs: %s", selectedTypes[0], iface.Name),
Field: field.Child("launchSecurity").String(),
})
}
}
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
/*
* arm64 utilities are in the webhooks package because they are used both
* by validation and mutation webhooks.
*/
package webhooks
import (
"fmt"
"slices"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
)
// ValidateVirtualMachineInstanceArm64Setting is a validation function for validating-webhook to filter unsupported setting on Arm64
func ValidateVirtualMachineInstanceArm64Setting(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var statusCauses []metav1.StatusCause
validateBootOptions(field, spec, &statusCauses)
validateCPUModel(field, spec, &statusCauses)
validateDiskBus(field, spec, &statusCauses)
validateWatchdog(field, spec, &statusCauses)
validateSoundDevice(field, spec, &statusCauses)
validateVideoTypeArm64(field, spec, &statusCauses)
return statusCauses
}
func validateVideoTypeArm64(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
if spec.Domain.Devices.Video == nil {
return
}
videoType := spec.Domain.Devices.Video.Type
validTypes := []string{"virtio", "ramfb"}
if !slices.Contains(validTypes, videoType) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("video model '%s' is not supported on arm64 architecture", videoType),
Field: field.Child("domain", "devices", "video").Child("type").String(),
})
}
}
func validateBootOptions(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
if spec.Domain.Firmware != nil && spec.Domain.Firmware.Bootloader != nil {
if spec.Domain.Firmware.Bootloader.BIOS != nil {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Arm64 does not support bios boot, please change to uefi boot",
Field: field.Child("domain", "firmware", "bootloader", "bios").String(),
})
}
if spec.Domain.Firmware.Bootloader.EFI != nil {
// When EFI is enable, secureboot is enabled by default, so here check two condition
// 1 is EFI is enabled without Secureboot setting
// 2 is both EFI and Secureboot enabled
if spec.Domain.Firmware.Bootloader.EFI.SecureBoot == nil || (spec.Domain.Firmware.Bootloader.EFI.SecureBoot != nil && *spec.Domain.Firmware.Bootloader.EFI.SecureBoot) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "UEFI secure boot is currently not supported on aarch64 Arch",
Field: field.Child("domain", "firmware", "bootloader", "efi", "secureboot").String(),
})
}
}
}
}
func validateCPUModel(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
if spec.Domain.CPU != nil && (&spec.Domain.CPU.Model != nil) && spec.Domain.CPU.Model != "" && spec.Domain.CPU.Model != v1.CPUModeHostPassthrough {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("currently, %v is the only model supported on Arm64", v1.CPUModeHostPassthrough),
Field: field.Child("domain", "cpu", "model").String(),
})
}
}
func validateDiskBus(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
if spec.Domain.Devices.Disks != nil {
// checkIfBusAvailable: if bus type is nil, virtio, scsi return true, otherwise, return false
checkIfBusAvailable := func(bus v1.DiskBus) bool {
if bus == "" || bus == v1.DiskBusVirtio || bus == v1.DiskBusSCSI {
return true
}
return false
}
for i, disk := range spec.Domain.Devices.Disks {
if disk.Disk != nil && !checkIfBusAvailable(disk.Disk.Bus) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Arm64 not support this disk bus type, please use virtio or scsi",
Field: field.Child("domain", "devices", "disks").Index(i).Child("disk", "bus").String(),
})
}
if disk.CDRom != nil && !checkIfBusAvailable(disk.CDRom.Bus) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Arm64 not support this disk bus type, please use virtio or scsi",
Field: field.Child("domain", "devices", "disks").Index(i).Child("cdrom", "bus").String(),
})
}
if disk.LUN != nil && !checkIfBusAvailable(disk.LUN.Bus) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Arm64 not support this disk bus type, please use virtio or scsi",
Field: field.Child("domain", "devices", "disks").Index(i).Child("lun", "bus").String(),
})
}
}
}
}
func validateWatchdog(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
if spec.Domain.Devices.Watchdog != nil {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Arm64 not support Watchdog device",
Field: field.Child("domain", "devices", "watchdog").String(),
})
}
}
func validateSoundDevice(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
if spec.Domain.Devices.Sound != nil {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "Arm64 not support sound device",
Field: field.Child("domain", "devices", "sound").String(),
})
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
/*
* hyperv utilities are in the webhooks package because they are used both
* by validation and mutation webhooks.
*/
package webhooks
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
nodelabellerutil "kubevirt.io/kubevirt/pkg/virt-handler/node-labeller/util"
)
var _true bool = true
func enableFeatureState(fs **v1.FeatureState) {
var val *v1.FeatureState
if *fs != nil {
val = *fs
} else {
val = &v1.FeatureState{}
}
val.Enabled = &_true
*fs = val
}
func isFeatureStateMissing(fs **v1.FeatureState) bool {
return *fs == nil || (*fs).Enabled == nil
}
// TODO: this dupes code in pkg/virt-controller/services/template.go
func isFeatureStateEnabled(fs **v1.FeatureState) bool {
return !isFeatureStateMissing(fs) && *((*fs).Enabled)
}
type HypervFeature struct {
State **v1.FeatureState
Field *k8sfield.Path
Requires *HypervFeature
}
func (hf HypervFeature) isRequirementOK() bool {
if !isFeatureStateEnabled(hf.State) {
return true
}
if hf.Requires == nil {
return true
}
return isFeatureStateEnabled(hf.Requires.State)
}
// a requirement is compatible if
// 1. it is already enabled (either by the user or by us previously)
// 2. the user has not set it, so we can do on its behalf
func (hf HypervFeature) TryToSetRequirement() error {
if !isFeatureStateEnabled(hf.State) || hf.Requires == nil {
// not enabled or no requirements: nothing to do
return nil
}
if isFeatureStateMissing(hf.Requires.State) {
enableFeatureState(hf.Requires.State)
return nil
}
if isFeatureStateEnabled(hf.Requires.State) {
return nil
}
return fmt.Errorf("%s", hf.String())
}
func (hf HypervFeature) IsRequirementFulfilled() (metav1.StatusCause, bool) {
if !hf.isRequirementOK() {
return metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: hf.String(),
Field: hf.Field.String(),
}, false
}
return metav1.StatusCause{}, true
}
func (hf HypervFeature) String() string {
if hf.Requires == nil {
return fmt.Sprintf("'%s' is missing", hf.Field.String())
}
return fmt.Sprintf("'%s' requires '%s', which was disabled.", hf.Field.String(), hf.Requires.Field.String())
}
func getHypervFeatureDependencies(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []HypervFeature {
if spec.Domain.Features == nil || spec.Domain.Features.Hyperv == nil {
return []HypervFeature{}
}
hyperv := spec.Domain.Features.Hyperv // shortcut
hypervField := field.Child("domain", "features", "hyperv") // shortcut
vpindex := HypervFeature{
State: &hyperv.VPIndex,
Field: hypervField.Child("vpindex"),
}
synic := HypervFeature{
State: &hyperv.SyNIC,
Field: hypervField.Child("synic"),
Requires: &vpindex,
}
vapic := HypervFeature{
State: &hyperv.VAPIC,
Field: hypervField.Child("vapic"),
}
syNICTimer := &v1.FeatureState{}
if hyperv.SyNICTimer != nil {
syNICTimer.Enabled = hyperv.SyNICTimer.Enabled
}
features := []HypervFeature{
// keep in REVERSE order: leaves first.
{
State: &hyperv.EVMCS,
Field: hypervField.Child("evmcs"),
Requires: &vapic,
},
{
State: &hyperv.IPI,
Field: hypervField.Child("ipi"),
Requires: &vpindex,
},
{
State: &hyperv.TLBFlush,
Field: hypervField.Child("tlbflush"),
Requires: &vpindex,
},
{
State: &syNICTimer,
Field: hypervField.Child("synictimer"),
Requires: &synic,
},
synic,
}
return features
}
func ValidateVirtualMachineInstanceHyperv(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
causes := ValidateVirtualMachineInstanceHypervFeatureDependencies(field, spec)
causes = append(causes, ValidateVirtualMachineInstanceHypervMode(field, spec)...)
return causes
}
func ValidateVirtualMachineInstanceHypervMode(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Features != nil && spec.Domain.Features.Hyperv != nil && spec.Domain.Features.HypervPassthrough != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Cannot explicitly set hyperV features if HypervPassthrough is being used. Please use either HyperV or HypervPassthrough.",
Field: field.String(),
})
}
return causes
}
func ValidateVirtualMachineInstanceHypervFeatureDependencies(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
// In a future, yet undecided, release either libvirt or QEMU are going to check the hyperv dependencies, so we can get rid of this code.
var causes []metav1.StatusCause
if features := getHypervFeatureDependencies(field, spec); features != nil {
for _, feat := range features {
if cause, ok := feat.IsRequirementFulfilled(); !ok {
causes = append(causes, cause)
}
}
}
if spec.Domain.Features == nil || spec.Domain.Features.Hyperv == nil || spec.Domain.Features.Hyperv.EVMCS == nil ||
(spec.Domain.Features.Hyperv.EVMCS.Enabled != nil && !(*spec.Domain.Features.Hyperv.EVMCS.Enabled)) {
return causes
}
evmcsDependency := getEVMCSDependency()
if spec.Domain.CPU == nil || spec.Domain.CPU.Features == nil || len(spec.Domain.CPU.Features) == 0 {
causes = append(causes, metav1.StatusCause{Type: metav1.CauseTypeFieldValueRequired, Message: fmt.Sprintf("%s cpu feature is required when evmcs is set", evmcsDependency.Name), Field: "spec.domain.cpu.features"})
return causes
}
for i, existingFeature := range spec.Domain.CPU.Features {
if existingFeature.Name == evmcsDependency.Name && existingFeature.Policy != evmcsDependency.Policy {
causes = append(causes, metav1.StatusCause{Type: metav1.CauseTypeFieldValueInvalid, Message: fmt.Sprintf("%s cpu feature has to be set to %s policy", evmcsDependency.Name, evmcsDependency.Policy), Field: fmt.Sprintf("spec.domain.cpu.features[%d].policy", i)})
}
}
return causes
}
func SetHypervFeatureDependencies(spec *v1.VirtualMachineInstanceSpec) error {
path := k8sfield.NewPath("spec")
if features := getHypervFeatureDependencies(path, spec); features != nil {
for _, feat := range features {
if err := feat.TryToSetRequirement(); err != nil {
return err
}
}
}
//Check if vmi has EVMCS feature enabled. If yes, we have to add vmx cpu feature
if spec.Domain.Features != nil && spec.Domain.Features.Hyperv != nil && spec.Domain.Features.Hyperv.EVMCS != nil &&
(spec.Domain.Features.Hyperv.EVMCS.Enabled == nil || (*spec.Domain.Features.Hyperv.EVMCS.Enabled)) {
setEVMCSDependency(spec)
}
return nil
}
func setEVMCSDependency(spec *v1.VirtualMachineInstanceSpec) {
vmxFeature := v1.CPUFeature{
Name: nodelabellerutil.VmxFeature,
Policy: nodelabellerutil.RequirePolicy,
}
cpuFeatures := []v1.CPUFeature{
vmxFeature,
}
if spec.Domain.CPU == nil {
spec.Domain.CPU = &v1.CPU{
Features: cpuFeatures,
}
return
}
if len(spec.Domain.CPU.Features) == 0 {
spec.Domain.CPU.Features = cpuFeatures
return
}
for _, requiredFeature := range cpuFeatures {
featureFound := false
for i, existingFeature := range spec.Domain.CPU.Features {
if existingFeature.Name == requiredFeature.Name {
featureFound = true
if existingFeature.Policy != requiredFeature.Policy {
spec.Domain.CPU.Features[i].Policy = requiredFeature.Policy
}
break
}
}
if !featureFound {
spec.Domain.CPU.Features = append(spec.Domain.CPU.Features, requiredFeature)
}
}
}
func getEVMCSDependency() v1.CPUFeature {
vmxFeature := v1.CPUFeature{
Name: nodelabellerutil.VmxFeature,
Policy: nodelabellerutil.RequirePolicy,
}
return vmxFeature
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package webhooks
import (
"fmt"
"slices"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
// ValidateVirtualMachineInstanceS390xSetting is a validation function for validating-webhook on s390x
func ValidateVirtualMachineInstanceS390XSetting(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var statusCauses []metav1.StatusCause
validateWatchdogS390x(field, spec, &statusCauses)
validateVideoTypeS390x(field, spec, &statusCauses)
return statusCauses
}
func validateVideoTypeS390x(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
if spec.Domain.Devices.Video == nil {
return
}
videoType := spec.Domain.Devices.Video.Type
validTypes := []string{"virtio"}
if !slices.Contains(validTypes, videoType) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("video model '%s' is not supported on s390x architecture", videoType),
Field: field.Child("domain", "devices", "video").Child("type").String(),
})
}
}
func validateWatchdogS390x(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, statusCauses *[]metav1.StatusCause) {
watchdog := spec.Domain.Devices.Watchdog
if watchdog == nil {
return
}
if !isOnlyDiag288Watchdog(watchdog) {
*statusCauses = append(*statusCauses, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "s390x only supports Diag288 watchdog device",
Field: field.Child("domain", "devices", "watchdog").String(),
})
}
}
func isOnlyDiag288Watchdog(watchdog *v1.Watchdog) bool {
return watchdog.WatchdogDevice.Diag288 != nil && watchdog.WatchdogDevice.I6300ESB == nil
}
func IsS390X(spec *v1.VirtualMachineInstanceSpec) bool {
return spec.Architecture == "s390x"
}
func ValidateLaunchSecurityS390x(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if !config.SecureExecutionEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s feature gate is not enabled in kubevirt-config", featuregate.SecureExecution),
Field: field.Child("launchSecurity").String(),
})
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package webhooks
import (
"fmt"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
)
func KubeVirtServiceAccounts(kubeVirtNamespace string) map[string]struct{} {
prefix := fmt.Sprintf("system:serviceaccount:%s", kubeVirtNamespace)
return map[string]struct{}{
fmt.Sprintf("%s:%s", prefix, components.ApiServiceAccountName): {},
fmt.Sprintf("%s:%s", prefix, components.ControllerServiceAccountName): {},
fmt.Sprintf("%s:%s", prefix, components.HandlerServiceAccountName): {},
fmt.Sprintf("%s:%s", prefix, components.SynchronizationControllerServiceAccountName): {},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
"strings"
admissionv1 "k8s.io/api/admission/v1"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubevirt"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
"kubevirt.io/kubevirt/pkg/virt-api/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
)
type MigrationCreateAdmitter struct {
virtClient kubevirt.Interface
clusterConfig *virtconfig.ClusterConfig
kubeVirtServiceAccounts map[string]struct{}
}
func NewMigrationCreateAdmitter(virtClient kubevirt.Interface, clusterConfig *virtconfig.ClusterConfig, kubeVirtServiceAccounts map[string]struct{}) *MigrationCreateAdmitter {
return &MigrationCreateAdmitter{
virtClient: virtClient,
clusterConfig: clusterConfig,
kubeVirtServiceAccounts: kubeVirtServiceAccounts,
}
}
func isMigratable(vmi *v1.VirtualMachineInstance, migration *v1.VirtualMachineInstanceMigration) error {
for _, c := range vmi.Status.Conditions {
if c.Type == v1.VirtualMachineInstanceIsMigratable &&
c.Status == k8sv1.ConditionFalse {
// Allow cross namespace/cluster migrations with non migratable disks.
// TODO: this is fragile since there could be other reasons for the VMI to be non migratable.
if c.Reason == v1.VirtualMachineInstanceReasonDisksNotMigratable && migration.IsDecentralized() {
continue
}
return fmt.Errorf("Cannot migrate VMI, Reason: %s, Message: %s", c.Reason, c.Message)
}
}
return nil
}
func ensureNoMigrationConflict(ctx context.Context, virtClient kubevirt.Interface, vmiName string, namespace string) error {
labelSelector, err := labels.Parse(fmt.Sprintf("%s in (%s)", v1.MigrationSelectorLabel, vmiName))
if err != nil {
return err
}
list, err := virtClient.KubevirtV1().VirtualMachineInstanceMigrations(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector.String(),
})
if err != nil {
return err
}
if len(list.Items) > 0 {
for _, mig := range list.Items {
if mig.Status.Phase == v1.MigrationSucceeded || mig.Status.Phase == v1.MigrationFailed {
continue
}
return fmt.Errorf("in-flight migration detected. Active migration job (%s) is currently already in progress for VMI %s.", string(mig.UID), mig.Spec.VMIName)
}
}
return nil
}
func (admitter *MigrationCreateAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
migration, _, err := getAdmissionReviewMigration(ar)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if resp := webhookutils.ValidateSchema(v1.VirtualMachineInstanceMigrationGroupVersionKind, ar.Request.Object.Raw); resp != nil {
return resp
}
causes := ValidateVirtualMachineInstanceMigrationSpec(k8sfield.NewPath("spec"), &migration.Spec)
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
if migration.Spec.Priority != nil {
if !admitter.clusterConfig.MigrationPriorityQueueEnabled() {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeForbidden,
Message: "MigrationPriorityQueue feature gate is not enabled in kubevirt resource",
Field: "spec.migrationPriority",
},
})
}
if !hasRequestOriginatedFromVirtController(ar.Request.UserInfo.Username, admitter.kubeVirtServiceAccounts) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeForbidden,
Message: "Migration priority queue, only virt-controller is allowed to set priority field",
Field: "spec.migrationPriority",
},
})
}
}
vmi, err := admitter.virtClient.KubevirtV1().VirtualMachineInstances(migration.Namespace).Get(ctx, migration.Spec.VMIName, metav1.GetOptions{})
if errors.IsNotFound(err) {
// ensure VMI exists for the migration
return webhookutils.ToAdmissionResponseError(fmt.Errorf("the VMI \"%s/%s\" does not exist", migration.Namespace, migration.Spec.VMIName))
} else if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
// Don't allow introducing a migration job for a VMI that has already finalized
if vmi.IsFinal() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("Cannot migrate VMI in finalized state."))
}
// Reject migration jobs for non-migratable VMIs
err = isMigratable(vmi, migration)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
// Don't allow new migration jobs to be introduced when previous migration jobs
// are already in flight.
err = ensureNoMigrationConflict(ctx, admitter.virtClient, migration.Spec.VMIName, migration.Namespace)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if migration.Spec.SendTo != nil || migration.Spec.Receive != nil {
config := admitter.clusterConfig
// Ensure the feature gate is enabled before allowing.
if !config.DecentralizedLiveMigrationEnabled() {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("%s feature gate is not enabled in kubevirt resource", featuregate.DecentralizedLiveMigration),
}})
}
// Check to make sure if both sendTo and receive are set, that the migrationID matches in both.
if migration.Spec.SendTo != nil && migration.Spec.Receive != nil && migration.Spec.SendTo.MigrationID != migration.Spec.Receive.MigrationID {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("sendTo migrationID %s does not match receive migrationID %s", migration.Spec.SendTo.MigrationID, migration.Spec.Receive.MigrationID),
}})
}
}
reviewResponse := admissionv1.AdmissionResponse{}
reviewResponse.Allowed = true
return &reviewResponse
}
func getAdmissionReviewMigration(ar *admissionv1.AdmissionReview) (new *v1.VirtualMachineInstanceMigration, old *v1.VirtualMachineInstanceMigration, err error) {
if !webhookutils.ValidateRequestResource(ar.Request.Resource, webhooks.MigrationGroupVersionResource.Group, webhooks.MigrationGroupVersionResource.Resource) {
return nil, nil, fmt.Errorf("expect resource to be '%s'", webhooks.MigrationGroupVersionResource)
}
raw := ar.Request.Object.Raw
newMigration := v1.VirtualMachineInstanceMigration{}
err = json.Unmarshal(raw, &newMigration)
if err != nil {
return nil, nil, err
}
if ar.Request.Operation == admissionv1.Update {
raw := ar.Request.OldObject.Raw
oldMigration := v1.VirtualMachineInstanceMigration{}
err = json.Unmarshal(raw, &oldMigration)
if err != nil {
return nil, nil, err
}
return &newMigration, &oldMigration, nil
}
return &newMigration, nil, nil
}
func ValidateVirtualMachineInstanceMigrationSpec(field *k8sfield.Path, spec *v1.VirtualMachineInstanceMigrationSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.VMIName == "" {
return append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("vmiName is missing"),
Field: field.Child("vmiName").String(),
})
}
return causes
}
func hasRequestOriginatedFromVirtController(requestUsername string, kubeVirtServiceAccounts map[string]struct{}) bool {
if _, isKubeVirtServiceAccount := kubeVirtServiceAccounts[requestUsername]; isKubeVirtServiceAccount {
return strings.HasSuffix(requestUsername, components.ControllerServiceAccountName)
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
validating_webhooks "kubevirt.io/kubevirt/pkg/util/webhooks/validating-webhooks"
)
type MigrationUpdateAdmitter struct {
}
func ensureSelectorLabelSafe(newMigration *v1.VirtualMachineInstanceMigration, oldMigration *v1.VirtualMachineInstanceMigration) []metav1.StatusCause {
if newMigration.Status.Phase != v1.MigrationSucceeded && newMigration.Status.Phase != v1.MigrationFailed && oldMigration.Labels != nil {
oldLabel, oldExists := oldMigration.Labels[v1.MigrationSelectorLabel]
if newMigration.Labels == nil {
if oldExists {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "selector label can't be removed from an in-flight migration",
},
}
}
} else {
newLabel, newExists := newMigration.Labels[v1.MigrationSelectorLabel]
if oldExists && (!newExists || newLabel != oldLabel) {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "selector label can't be modified on an in-flight migration",
},
}
}
}
}
return []metav1.StatusCause{}
}
func (admitter *MigrationUpdateAdmitter) Admit(_ context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
// Get new migration from admission response
newMigration, oldMigration, err := getAdmissionReviewMigration(ar)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if resp := webhookutils.ValidateSchema(v1.VirtualMachineInstanceMigrationGroupVersionKind, ar.Request.Object.Raw); resp != nil {
return resp
}
// Reject Migration update if spec changed
if !equality.Semantic.DeepEqual(newMigration.Spec, oldMigration.Spec) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "update of Migration object's spec is restricted",
},
})
}
// Reject Migration update if selector label changed on an in-flight migration
causes := ensureSelectorLabelSafe(newMigration, oldMigration)
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
return validating_webhooks.NewPassingAdmissionResponse()
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"kubevirt.io/api/migrations"
migrationsv1 "kubevirt.io/api/migrations/v1alpha1"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
)
// MigrationPolicyAdmitter validates VirtualMachineSnapshots
type MigrationPolicyAdmitter struct {
}
// NewMigrationPolicyAdmitter creates a MigrationPolicyAdmitter
func NewMigrationPolicyAdmitter() *MigrationPolicyAdmitter {
return &MigrationPolicyAdmitter{}
}
// Admit validates an AdmissionReview
func (admitter *MigrationPolicyAdmitter) Admit(_ context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != migrationsv1.MigrationPolicyKind.Group ||
ar.Request.Resource.Resource != migrations.ResourceMigrationPolicies {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
policy := &migrationsv1.MigrationPolicy{}
err := json.Unmarshal(ar.Request.Object.Raw, policy)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
sourceField := k8sfield.NewPath("spec")
spec := policy.Spec
if spec.CompletionTimeoutPerGiB != nil && *spec.CompletionTimeoutPerGiB < 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "must not be negative",
Field: sourceField.Child("completionTimeoutPerGiB").String(),
})
}
if spec.BandwidthPerMigration != nil {
quantity, ok := spec.BandwidthPerMigration.AsInt64()
if !ok {
dec := spec.BandwidthPerMigration.AsDec()
quantity = int64(dec.Sign())
}
if quantity < 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "must not be negative",
Field: sourceField.Child("bandwidthPerMigration").String(),
})
}
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
"net/http"
admissionv1 "k8s.io/api/admission/v1"
k8scorev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
virtv1 "kubevirt.io/api/core/v1"
kubevirt "kubevirt.io/client-go/kubevirt"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/util/migrations"
validating_webhooks "kubevirt.io/kubevirt/pkg/util/webhooks/validating-webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
const (
// requestedByAnnotation is an annotation set by the descheduler on the eviction requests whose value
// will be the component the eviction request is originated from.
requestedByAnnotation = "requested-by"
// requestedByDeschedulerValue is the value of the `requested-by` annotation set by the descheduler on the eviction requests
// Ref: https://github.com/kubernetes-sigs/descheduler/pull/1753
requestedByDeschedulerValue = "sigs.k8s.io/descheduler"
)
type PodEvictionAdmitter struct {
clusterConfig *virtconfig.ClusterConfig
kubeClient kubernetes.Interface
virtClient kubevirt.Interface
}
func NewPodEvictionAdmitter(clusterConfig *virtconfig.ClusterConfig, kubeClient kubernetes.Interface, virtClient kubevirt.Interface) *PodEvictionAdmitter {
return &PodEvictionAdmitter{
clusterConfig: clusterConfig,
kubeClient: kubeClient,
virtClient: virtClient,
}
}
func isDryRun(ar *admissionv1.AdmissionReview, evictionObject *policyv1.Eviction) bool {
dryRun := ar.Request.DryRun != nil && *ar.Request.DryRun == true
if !dryRun {
if evictionObject.DeleteOptions != nil && len(evictionObject.DeleteOptions.DryRun) > 0 {
dryRun = evictionObject.DeleteOptions.DryRun[0] == metav1.DryRunAll
}
}
return dryRun
}
func isDeschedulerEviction(evictionObject *policyv1.Eviction) bool {
if value, exists := evictionObject.GetAnnotations()[requestedByAnnotation]; exists && value == requestedByDeschedulerValue {
return true
}
return false
}
func (admitter *PodEvictionAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
pod, err := admitter.kubeClient.CoreV1().Pods(ar.Request.Namespace).Get(ctx, ar.Request.Name, metav1.GetOptions{})
if err != nil {
return validating_webhooks.NewPassingAdmissionResponse()
}
switch {
case isHotplugPod(pod):
return admitter.admitHotplugPod(ctx, pod)
case isVirtLauncher(pod) && !isCompleted(pod):
return admitter.admitLauncherPod(ctx, ar, pod)
}
return validating_webhooks.NewPassingAdmissionResponse()
}
func (admitter *PodEvictionAdmitter) admitHotplugPod(ctx context.Context, pod *k8scorev1.Pod) *admissionv1.AdmissionResponse {
ownerPod, err := admitter.kubeClient.CoreV1().Pods(pod.Namespace).Get(ctx, pod.OwnerReferences[0].Name, metav1.GetOptions{})
if err != nil {
if !k8serrors.IsNotFound(err) {
return denied(fmt.Sprintf("failed getting owner for hotplug pod: %v", err))
}
return validating_webhooks.NewPassingAdmissionResponse()
}
if !isVirtLauncher(ownerPod) || isCompleted(ownerPod) {
return validating_webhooks.NewPassingAdmissionResponse()
}
vmiName, exists := ownerPod.GetAnnotations()[virtv1.DomainAnnotation]
if !exists {
return validating_webhooks.NewPassingAdmissionResponse()
}
_, err = admitter.virtClient.KubevirtV1().VirtualMachineInstances(pod.Namespace).Get(ctx, vmiName, metav1.GetOptions{})
if err != nil {
if !k8serrors.IsNotFound(err) {
return denied(fmt.Sprintf("kubevirt failed getting the vmi: %v", err))
}
return validating_webhooks.NewPassingAdmissionResponse()
}
return denied(fmt.Sprintf("cannot evict hotplug pod: %s associated with running vmi: %s in namespace %s", pod.Name, vmiName, pod.Namespace))
}
func (admitter *PodEvictionAdmitter) admitLauncherPod(ctx context.Context, ar *admissionv1.AdmissionReview, pod *k8scorev1.Pod) *admissionv1.AdmissionResponse {
vmiName, exists := pod.GetAnnotations()[virtv1.DomainAnnotation]
if !exists {
return validating_webhooks.NewPassingAdmissionResponse()
}
vmi, err := admitter.virtClient.KubevirtV1().VirtualMachineInstances(ar.Request.Namespace).Get(ctx, vmiName, metav1.GetOptions{})
if err != nil {
return denied(fmt.Sprintf("kubevirt failed getting the vmi: %v", err))
}
evictionStrategy := migrations.VMIEvictionStrategy(admitter.clusterConfig, vmi)
if evictionStrategy == nil {
// we don't act on VMIs without an eviction strategy
return validating_webhooks.NewPassingAdmissionResponse()
}
markForEviction := false
switch *evictionStrategy {
case virtv1.EvictionStrategyLiveMigrate:
if !vmi.IsMigratable() {
return denied(fmt.Sprintf("VMI %s is configured with an eviction strategy but is not live-migratable", vmi.Name))
}
markForEviction = true
case virtv1.EvictionStrategyLiveMigrateIfPossible:
if vmi.IsMigratable() {
markForEviction = true
}
case virtv1.EvictionStrategyExternal:
markForEviction = true
}
if !markForEviction {
return validating_webhooks.NewPassingAdmissionResponse()
}
// This message format is expected from descheduler.
const evictionFmt = "Eviction triggered evacuation of VMI \"%s/%s\""
if vmi.IsMarkedForEviction() {
return denied(fmt.Sprintf("Evacuation in progress: "+evictionFmt, vmi.Namespace, vmi.Name))
}
if vmi.Status.NodeName != pod.Spec.NodeName {
return denied("Eviction request for target Pod")
}
evictionObject := policyv1.Eviction{}
if err := json.Unmarshal(ar.Request.Object.Raw, &evictionObject); err != nil {
denied(fmt.Sprintf("failed to parse the eviction object: %v", err))
}
descEviction := isDeschedulerEviction(&evictionObject)
err = admitter.markVMI(ctx, vmi, pod.Spec.NodeName, isDryRun(ar, &evictionObject), descEviction)
if err != nil {
// As with the previous case, it is up to the user to issue a retry.
return denied(fmt.Sprintf("kubevirt failed marking the vmi for eviction: %v", err))
}
return denied(fmt.Sprintf(evictionFmt, vmi.Namespace, vmi.Name))
}
func (admitter *PodEvictionAdmitter) markVMI(ctx context.Context, vmi *virtv1.VirtualMachineInstance, nodeName string, dryRun, deschedulerEviction bool) error {
patchSet := patch.New(patch.WithAdd("/status/evacuationNodeName", nodeName))
if deschedulerEviction {
if len(vmi.Annotations) == 0 {
patchSet.AddOption(patch.WithAdd("/metadata/annotations", map[string]string{patch.EscapeJSONPointer(virtv1.EvictionSourceAnnotation): "descheduler"}))
} else {
patchSet.AddOption(patch.WithReplace(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(virtv1.EvictionSourceAnnotation)), "descheduler"))
}
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
var patchOptions metav1.PatchOptions
if dryRun {
patchOptions.DryRun = []string{metav1.DryRunAll}
}
_, err = admitter.
virtClient.
KubevirtV1().
VirtualMachineInstances(vmi.Namespace).
Patch(ctx,
vmi.Name,
types.JSONPatchType,
patchBytes,
patchOptions,
)
return err
}
func denied(message string) *admissionv1.AdmissionResponse {
return &admissionv1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Message: message,
Code: http.StatusTooManyRequests,
},
}
}
func isVirtLauncher(pod *k8scorev1.Pod) bool {
return pod.Labels[virtv1.AppLabel] == "virt-launcher"
}
func isHotplugPod(pod *k8scorev1.Pod) bool {
return pod.Labels[virtv1.AppLabel] == "hotplug-disk" && len(pod.OwnerReferences) == 1
}
func isCompleted(pod *k8scorev1.Pod) bool {
return pod.Status.Phase == k8scorev1.PodFailed || pod.Status.Phase == k8scorev1.PodSucceeded
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
admissionv1 "k8s.io/api/admission/v1"
webhooks2 "kubevirt.io/kubevirt/pkg/virt-api/webhooks"
"kubevirt.io/kubevirt/pkg/util/webhooks"
)
type StatusAdmitter struct {
VmsAdmitter *VMsAdmitter
}
func (s *StatusAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if resp := webhooks.ValidateStatus(ar.Request.Object.Raw); resp != nil {
return resp
}
if webhooks.ValidateRequestResource(ar.Request.Resource, webhooks2.VirtualMachineGroupVersionResource.Group, webhooks2.VirtualMachineGroupVersionResource.Resource) {
return s.VmsAdmitter.AdmitStatus(ctx, ar)
}
reviewResponse := admissionv1.AdmissionResponse{}
reviewResponse.Allowed = true
return &reviewResponse
}
/*
Copyright 2014 The Kubernetes Authors.
Copyright The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
The code here is taken from https://github.com/kubernetes/kubernetes/blob/29fb8e8b5a41b2a7d760190284bae7f2829312d3/pkg/apis/core/validation/validation.go#L3288
The "core" package is change to exported package "k8s.io/api/core/v1" in
order to avoid dependency on kubernetes/kubernetes
https://github.com/kubernetes/kubernetes/blame/29fb8e8b5a41b2a7d760190284bae7f2829312d3/pkg/apis/core/validation/validation.go#L3288
the code hardly changes all of the changes have been atleast a few years older
this makes it easier to copy and maintain instead of vendoring in kubernetes or
creating dry runs of the pod object during admission validation.
*/
package admitters
import (
"fmt"
core "k8s.io/api/core/v1"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
)
const isNotPositiveErrorMsg string = `must be greater than zero`
// ValidateNamespaceName can be used to check whether the given namespace name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateNamespaceName = apimachineryvalidation.ValidateNamespaceName
// ValidateNodeName can be used to check whether the given node name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateNodeName = apimachineryvalidation.NameIsDNSSubdomain
var nodeFieldSelectorValidators = map[string]func(string, bool) []string{
metav1.ObjectNameField: ValidateNodeName,
}
// validateAffinity checks if given affinities are valid
func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if affinity != nil {
if affinity.NodeAffinity != nil {
allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...)
}
if affinity.PodAffinity != nil {
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...)
}
if affinity.PodAntiAffinity != nil {
allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, fldPath.Child("podAntiAffinity"))...)
}
}
return allErrs
}
// validateNodeAffinity tests that the specified nodeAffinity fields have valid data
func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if na.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
// }
if na.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
}
// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data
func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, term := range terms {
if term.Weight <= 0 || term.Weight > 100 {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100"))
}
allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...)
}
return allErrs
}
// validatePodAffinity tests that the specified podAffinity fields have valid data
func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
//}
if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
}
// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data
func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for j, weightedTerm := range weightedPodAffinityTerms {
if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 {
allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100"))
}
allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, fldPath.Index(j).Child("podAffinityTerm"))...)
}
return allErrs
}
// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{}, fldPath.Child("labelSelector"))...)
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, unversionedvalidation.LabelSelectorValidationOptions{}, fldPath.Child("namespaceSelector"))...)
for _, name := range podAffinityTerm.Namespaces {
for _, msg := range ValidateNamespaceName(name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg))
}
}
if len(podAffinityTerm.TopologyKey) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty"))
}
return append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...)
}
// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data
func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
//}
if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
}
// ValidateNodeSelector tests that the specified nodeSelector fields has valid data
func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
termFldPath := fldPath.Child("nodeSelectorTerms")
if len(nodeSelector.NodeSelectorTerms) == 0 {
return append(allErrs, field.Required(termFldPath, "must have at least one node selector term"))
}
for i, term := range nodeSelector.NodeSelectorTerms {
allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...)
}
return allErrs
}
// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data
func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, podAffinityTerm := range podAffinityTerms {
allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...)
}
return allErrs
}
// ValidateNodeSelectorTerm tests that the specified node selector term has valid data
func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for j, req := range term.MatchExpressions {
allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...)
}
for j, req := range term.MatchFields {
allErrs = append(allErrs, ValidateNodeFieldSelectorRequirement(req, fldPath.Child("matchFields").Index(j))...)
}
return allErrs
}
// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch rq.Operator {
case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn:
if len(rq.Values) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
}
case core.NodeSelectorOpExists, core.NodeSelectorOpDoesNotExist:
if len(rq.Values) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
}
case core.NodeSelectorOpGt, core.NodeSelectorOpLt:
if len(rq.Values) != 1 {
allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'"))
}
default:
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator"))
}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...)
return allErrs
}
// ValidateNodeFieldSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data
func ValidateNodeFieldSelectorRequirement(req core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch req.Operator {
case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn:
if len(req.Values) != 1 {
allErrs = append(allErrs, field.Required(fldPath.Child("values"),
"must be only one value when `operator` is 'In' or 'NotIn' for node field selector"))
}
default:
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, "not a valid selector operator"))
}
if vf, found := nodeFieldSelectorValidators[req.Key]; !found {
allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), req.Key, "not a valid field selector key"))
} else {
for i, v := range req.Values {
for _, msg := range vf(v, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(i), v, msg))
}
}
}
return allErrs
}
var (
supportedScheduleActions = sets.NewString(string(core.DoNotSchedule), string(core.ScheduleAnyway))
)
// validateTopologySpreadConstraints validates given TopologySpreadConstraints.
func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, constraint := range constraints {
subFldPath := fldPath.Index(i)
if err := ValidateMaxSkew(subFldPath.Child("maxSkew"), constraint.MaxSkew); err != nil {
allErrs = append(allErrs, err)
}
if errs := ValidateTopologyKey(subFldPath.Child("topologyKey"), constraint.TopologyKey); errs != nil {
allErrs = append(allErrs, errs...)
}
if err := ValidateWhenUnsatisfiable(subFldPath.Child("whenUnsatisfiable"), constraint.WhenUnsatisfiable); err != nil {
allErrs = append(allErrs, err)
}
// this is missing in upstream codebase https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/core/validation/validation.go#L6571-L6600
// issue captured here https://github.com/kubernetes/kubernetes/issues/111791#issuecomment-1211184962
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(constraint.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{}, fldPath.Child("labelSelector"))...)
// tuple {topologyKey, whenUnsatisfiable} denotes one kind of spread constraint
if err := ValidateSpreadConstraintNotRepeat(subFldPath.Child("{topologyKey, whenUnsatisfiable}"), constraint, constraints[i+1:]); err != nil {
allErrs = append(allErrs, err)
}
}
return allErrs
}
// ValidateMaxSkew tests that the argument is a valid MaxSkew.
func ValidateMaxSkew(fldPath *field.Path, maxSkew int32) *field.Error {
if maxSkew <= 0 {
return field.Invalid(fldPath, maxSkew, isNotPositiveErrorMsg)
}
return nil
}
// ValidateTopologyKey tests that the argument is a valid TopologyKey.
func ValidateTopologyKey(fldPath *field.Path, topologyKey string) field.ErrorList {
allErrs := field.ErrorList{}
if len(topologyKey) == 0 {
return append(allErrs, field.Required(fldPath, "can not be empty"))
}
return unversionedvalidation.ValidateLabelName(topologyKey, fldPath)
}
// ValidateWhenUnsatisfiable tests that the argument is a valid UnsatisfiableConstraintAction.
func ValidateWhenUnsatisfiable(fldPath *field.Path, action core.UnsatisfiableConstraintAction) *field.Error {
if !supportedScheduleActions.Has(string(action)) {
return field.NotSupported(fldPath, action, supportedScheduleActions.List())
}
return nil
}
// ValidateSpreadConstraintNotRepeat tests that if `constraint` duplicates with `existingConstraintPairs`
// on TopologyKey and WhenUnsatisfiable fields.
func ValidateSpreadConstraintNotRepeat(fldPath *field.Path, constraint core.TopologySpreadConstraint, restingConstraints []core.TopologySpreadConstraint) *field.Error {
for _, restingConstraint := range restingConstraints {
if constraint.TopologyKey == restingConstraint.TopologyKey &&
constraint.WhenUnsatisfiable == restingConstraint.WhenUnsatisfiable {
return field.Duplicate(fldPath, fmt.Sprintf("{%v, %v}", constraint.TopologyKey, constraint.WhenUnsatisfiable))
}
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
"strings"
"kubevirt.io/kubevirt/pkg/network/link"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
clonebase "kubevirt.io/api/clone"
clone "kubevirt.io/api/clone/v1beta1"
"kubevirt.io/client-go/kubecli"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
const (
virtualMachineKind = "VirtualMachine"
virtualMachineSnapshotKind = "VirtualMachineSnapshot"
)
// VirtualMachineCloneAdmitter validates VirtualMachineClones
type VirtualMachineCloneAdmitter struct {
Config *virtconfig.ClusterConfig
Client kubecli.KubevirtClient
}
// NewVMCloneAdmitter creates a VM Clone Admitter
func NewVMCloneAdmitter(config *virtconfig.ClusterConfig, client kubecli.KubevirtClient) *VirtualMachineCloneAdmitter {
return &VirtualMachineCloneAdmitter{
Config: config,
Client: client,
}
}
// Admit validates an AdmissionReview
func (admitter *VirtualMachineCloneAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != clone.VirtualMachineCloneKind.Group {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected group: %+v. Expected group: %+v", ar.Request.Resource.Group, clone.VirtualMachineCloneKind.Group))
}
if ar.Request.Resource.Resource != clonebase.ResourceVMClonePlural {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource: %+v. Expected resource: %+v", ar.Request.Resource.Resource, clonebase.ResourceVMClonePlural))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.SnapshotEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("snapshot feature gate is not enabled"))
}
vmClone := &clone.VirtualMachineClone{}
err := json.Unmarshal(ar.Request.Object.Raw, vmClone)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
if newCauses := validateFilters(vmClone.Spec.AnnotationFilters, "spec.annotations"); newCauses != nil {
causes = append(causes, newCauses...)
}
if newCauses := validateFilters(vmClone.Spec.LabelFilters, "spec.labels"); newCauses != nil {
causes = append(causes, newCauses...)
}
if newCauses := validateFilters(vmClone.Spec.Template.AnnotationFilters, "spec.template.annotations"); newCauses != nil {
causes = append(causes, newCauses...)
}
if newCauses := validateFilters(vmClone.Spec.Template.LabelFilters, "spec.template.labels"); newCauses != nil {
causes = append(causes, newCauses...)
}
if newCauses := validateSourceAndTargetKind(vmClone); newCauses != nil {
causes = append(causes, newCauses...)
}
if newCauses := validateSource(ctx, admitter.Client, vmClone); newCauses != nil {
causes = append(causes, newCauses...)
}
if newCauses := validateTarget(vmClone); newCauses != nil {
causes = append(causes, newCauses...)
}
if newCauses := validateNewMacAddresses(vmClone); newCauses != nil {
causes = append(causes, newCauses...)
}
if newCauses := validatePatches(vmClone); newCauses != nil {
causes = append(causes, newCauses...)
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
}
func validateFilters(filters []string, fieldName string) (causes []metav1.StatusCause) {
if filters == nil {
return nil
}
addCause := func(message string) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: message,
Field: fieldName,
})
}
const negationChar = "!"
const wildcardChar = "*"
for _, filter := range filters {
if len(filter) == 1 {
if filter == negationChar {
addCause("a negation character is not a valid filter")
}
continue
}
const errPattern = "%s filter %s is invalid: cannot contain a %s character (%s); FilterRules: %s"
if filterWithoutFirstChar := filter[1:]; strings.Contains(filterWithoutFirstChar, negationChar) {
addCause(fmt.Sprintf(errPattern, fieldName, filter, "negation", negationChar, "NegationChar can be only used at the beginning of the filter"))
}
if filterWithoutLastChar := filter[:len(filter)-1]; strings.Contains(filterWithoutLastChar, wildcardChar) {
addCause(fmt.Sprintf(errPattern, fieldName, filter, "wildcard", wildcardChar, "WildcardChar can be only at the end of the filter"))
}
}
return causes
}
func validateSourceAndTargetKind(vmClone *clone.VirtualMachineClone) []metav1.StatusCause {
var causes []metav1.StatusCause = nil
sourceField := k8sfield.NewPath("spec")
supportedSourceTypes := []string{virtualMachineKind, virtualMachineSnapshotKind}
supportedTargetTypes := []string{virtualMachineKind}
if !doesSliceContainStr(supportedSourceTypes, vmClone.Spec.Source.Kind) {
causes = []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Source kind is not supported",
Field: sourceField.Child("Source").Child("Kind").String(),
}}
}
if vmClone.Spec.Target != nil && !doesSliceContainStr(supportedTargetTypes, vmClone.Spec.Target.Kind) {
if causes == nil {
causes = []metav1.StatusCause{}
}
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Target kind is not supported",
Field: sourceField.Child("Target").Child("Kind").String(),
})
}
return causes
}
func validateSource(ctx context.Context, client kubecli.KubevirtClient, vmClone *clone.VirtualMachineClone) []metav1.StatusCause {
var causes []metav1.StatusCause = nil
sourceField := k8sfield.NewPath("spec")
source := vmClone.Spec.Source
if source == nil {
causes = []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Source cannot be nil",
Field: sourceField.Child("Source").String(),
}}
return causes
}
if source.APIGroup == nil || *source.APIGroup == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Source's APIGroup cannot be empty",
Field: sourceField.Child("Source").Child("APIGroup").String(),
})
}
if source.Kind == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Source's Kind cannot be empty",
Field: sourceField.Child("Source").String(),
})
}
if source.Name == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Source's name cannot be empty",
Field: sourceField.Child("Source").Child("Name").String(),
})
}
return causes
}
func validateTarget(vmClone *clone.VirtualMachineClone) []metav1.StatusCause {
var causes []metav1.StatusCause
source := vmClone.Spec.Source
target := vmClone.Spec.Target
if source != nil &&
target != nil &&
source.Kind == virtualMachineKind &&
target.Kind == virtualMachineKind &&
target.Name == source.Name {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Target name cannot be equal to source name when both are VirtualMachines",
Field: k8sfield.NewPath("spec").Child("target").Child("name").String(),
})
}
return causes
}
func validateNewMacAddresses(vmClone *clone.VirtualMachineClone) []metav1.StatusCause {
var causes []metav1.StatusCause
for ifaceName, ifaceMac := range vmClone.Spec.NewMacAddresses {
if ifaceMac != "" {
if err := link.ValidateMacAddress(ifaceMac); err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("interface %s has malformed MAC address (%s).", ifaceName, ifaceMac),
Field: k8sfield.NewPath("spec").Child("newMacAddresses").Child(ifaceName).String(),
})
}
}
}
return causes
}
func validatePatches(vmClone *clone.VirtualMachineClone) []metav1.StatusCause {
var causes []metav1.StatusCause
for i, patch := range vmClone.Spec.Patches {
if !json.Valid([]byte(patch)) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("patch is not valid JSON (%s)", patch),
Field: k8sfield.NewPath("spec").Child("patches").Index(i).String(),
})
}
}
return causes
}
func doesSliceContainStr(slice []string, str string) (isFound bool) {
for _, curSliceStr := range slice {
if curSliceStr == str {
isFound = true
break
}
}
return isFound
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/base64"
"fmt"
"net"
"path/filepath"
"slices"
"strings"
admissionv1 "k8s.io/api/admission/v1"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/downwardmetrics"
draadmitter "kubevirt.io/kubevirt/pkg/dra/admitter"
"kubevirt.io/kubevirt/pkg/hooks"
netadmitter "kubevirt.io/kubevirt/pkg/network/admitter"
"kubevirt.io/kubevirt/pkg/network/vmispec"
storageadmitters "kubevirt.io/kubevirt/pkg/storage/admitters"
"kubevirt.io/kubevirt/pkg/storage/reservation"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
hwutil "kubevirt.io/kubevirt/pkg/util/hardware"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
"kubevirt.io/kubevirt/pkg/virt-api/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
const requiredFieldFmt = "%s is a required field"
const (
// cloudInitNetworkMaxLen and CloudInitUserMaxLen are being limited
// to 2K to allow scaling of config as edits will cause entire object
// to be distributed to large no of nodes. For larger than 2K, user should
// use NetworkDataSecretRef and UserDataSecretRef
cloudInitUserMaxLen = 2048
cloudInitNetworkMaxLen = 2048
// Copied from kubernetes/pkg/apis/core/validation/validation.go
maxDNSNameservers = 3
maxDNSSearchPaths = 6
maxDNSSearchListChars = 256
)
var validIOThreadsPolicies = []v1.IOThreadsPolicy{v1.IOThreadsPolicyShared, v1.IOThreadsPolicyAuto, v1.IOThreadsPolicySupplementalPool}
var validCPUFeaturePolicies = map[string]*struct{}{"": nil, "force": nil, "require": nil, "optional": nil, "disable": nil, "forbid": nil}
var validPanicDeviceModels = []v1.PanicDeviceModel{v1.Hyperv, v1.Isa, v1.Pvpanic}
var restrictedVmiLabels = map[string]bool{
v1.CreatedByLabel: true,
v1.MigrationJobLabel: true,
v1.NodeNameLabel: true,
v1.MigrationTargetNodeNameLabel: true,
v1.NodeSchedulable: true,
v1.InstallStrategyLabel: true,
}
const (
nameOfTypeNotFoundMessagePattern = "%s '%s' not found."
valueMustBePositiveMessagePattern = "%s '%s': must be greater than or equal to 0."
)
var invalidPanicDeviceModelErrFmt = "invalid PanicDeviceModel(%s)"
// SpecValidator validates the given VMI spec
type SpecValidator func(*k8sfield.Path, *v1.VirtualMachineInstanceSpec, *virtconfig.ClusterConfig) []metav1.StatusCause
type VMICreateAdmitter struct {
ClusterConfig *virtconfig.ClusterConfig
SpecValidators []SpecValidator
KubeVirtServiceAccounts map[string]struct{}
}
func (admitter *VMICreateAdmitter) Admit(_ context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if resp := webhookutils.ValidateSchema(v1.VirtualMachineInstanceGroupVersionKind, ar.Request.Object.Raw); resp != nil {
return resp
}
vmi, _, err := webhookutils.GetVMIFromAdmissionReview(ar)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
clusterCfg := admitter.ClusterConfig.GetConfig()
if devCfg := clusterCfg.DeveloperConfiguration; devCfg != nil {
causes = append(causes, featuregate.ValidateFeatureGates(devCfg.FeatureGates, &vmi.Spec)...)
}
for _, validateSpec := range admitter.SpecValidators {
causes = append(causes, validateSpec(k8sfield.NewPath("spec"), &vmi.Spec, admitter.ClusterConfig)...)
}
causes = append(causes, ValidateVirtualMachineInstanceSpec(k8sfield.NewPath("spec"), &vmi.Spec, admitter.ClusterConfig)...)
// We only want to validate that volumes are mapped to disks or filesystems during VMI admittance, thus this logic is seperated from the above call that is shared with the VM admitter.
causes = append(causes, validateVirtualMachineInstanceSpecVolumeDisks(k8sfield.NewPath("spec"), &vmi.Spec)...)
causes = append(causes, ValidateVirtualMachineInstanceMandatoryFields(k8sfield.NewPath("spec"), &vmi.Spec)...)
_, isKubeVirtServiceAccount := admitter.KubeVirtServiceAccounts[ar.Request.UserInfo.Username]
causes = append(causes, ValidateVirtualMachineInstanceMetadata(k8sfield.NewPath("metadata"), &vmi.ObjectMeta, admitter.ClusterConfig, isKubeVirtServiceAccount)...)
causes = append(causes, webhooks.ValidateVirtualMachineInstanceHyperv(k8sfield.NewPath("spec").Child("domain").Child("features").Child("hyperv"), &vmi.Spec)...)
causes = append(causes, ValidateVirtualMachineInstancePerArch(k8sfield.NewPath("spec"), &vmi.Spec)...)
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
return &admissionv1.AdmissionResponse{
Allowed: true,
Warnings: warnDeprecatedAPIs(&vmi.Spec, admitter.ClusterConfig),
}
}
func warnDeprecatedAPIs(spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []string {
var warnings []string
for _, fg := range config.GetConfig().DeveloperConfiguration.FeatureGates {
deprecatedFeature := featuregate.FeatureGateInfo(fg)
if deprecatedFeature != nil && deprecatedFeature.State == featuregate.Deprecated && deprecatedFeature.VmiSpecUsed != nil {
if used := deprecatedFeature.VmiSpecUsed(spec); used {
warnings = append(warnings, deprecatedFeature.Message)
}
}
}
return warnings
}
func ValidateVirtualMachineInstancePerArch(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
arch := spec.Architecture
switch arch {
case "amd64":
causes = append(causes, webhooks.ValidateVirtualMachineInstanceAmd64Setting(field, spec)...)
case "s390x":
causes = append(causes, webhooks.ValidateVirtualMachineInstanceS390XSetting(field, spec)...)
case "arm64":
causes = append(causes, webhooks.ValidateVirtualMachineInstanceArm64Setting(field, spec)...)
default:
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("unsupported architecture: %s", arch),
Field: field.Child("architecture").String(),
})
}
return causes
}
func ValidateVirtualMachineInstanceSpec(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
causes = append(causes, validateHostNameNotConformingToDNSLabelRules(field, spec)...)
causes = append(causes, validateSubdomainDNSSubdomainRules(field, spec)...)
causes = append(causes, validateMemoryRequestsNegativeOrNull(field, spec)...)
causes = append(causes, validateMemoryLimitsNegativeOrNull(field, spec)...)
causes = append(causes, validateHugepagesMemoryRequests(field, spec)...)
causes = append(causes, validateGuestMemoryLimit(field, spec, config)...)
causes = append(causes, validateEmulatedMachine(field, spec, config)...)
causes = append(causes, validateFirmwareACPI(field.Child("acpi"), spec)...)
causes = append(causes, validateCPURequestNotNegative(field, spec)...)
causes = append(causes, validateCPULimitNotNegative(field, spec)...)
causes = append(causes, validateCpuRequestDoesNotExceedLimit(field, spec)...)
causes = append(causes, validateCpuPinning(field, spec, config)...)
causes = append(causes, validateNUMA(field, spec, config)...)
causes = append(causes, validateCPUIsolatorThread(field, spec)...)
causes = append(causes, validateCPUFeaturePolicies(field, spec)...)
causes = append(causes, validateCPUHotplug(field, spec)...)
causes = append(causes, validateStartStrategy(field, spec)...)
causes = append(causes, validateRealtime(field, spec)...)
causes = append(causes, validateSpecAffinity(field, spec)...)
causes = append(causes, validateSpecTopologySpreadConstraints(field, spec)...)
netValidator := netadmitter.NewValidator(field, spec, config)
causes = append(causes, netValidator.Validate()...)
causes = append(causes, draadmitter.ValidateCreation(field, spec, config)...)
causes = append(causes, validateBootOrder(field, spec, config)...)
causes = append(causes, validateInputDevices(field, spec)...)
causes = append(causes, validateIOThreadsPolicy(field, spec)...)
causes = append(causes, validateProbe(field.Child("readinessProbe"), spec.ReadinessProbe)...)
causes = append(causes, validateProbe(field.Child("livenessProbe"), spec.LivenessProbe)...)
if podNetwork := vmispec.LookupPodNetwork(spec.Networks); podNetwork == nil {
causes = appendStatusCauseForProbeNotAllowedWithNoPodNetworkPresent(field.Child("readinessProbe"), spec.ReadinessProbe, causes)
causes = appendStatusCauseForProbeNotAllowedWithNoPodNetworkPresent(field.Child("livenessProbe"), spec.LivenessProbe, causes)
}
causes = append(causes, validateDomainSpec(field.Child("domain"), &spec.Domain)...)
causes = append(causes, validateVolumes(field.Child("volumes"), spec.Volumes, config)...)
causes = append(causes, storageadmitters.ValidateContainerDisks(field, spec)...)
causes = append(causes, storageadmitters.ValidateUtilityVolumesNotPresentOnCreation(field, spec)...)
causes = append(causes, validateAccessCredentials(field.Child("accessCredentials"), spec.AccessCredentials, spec.Volumes)...)
if spec.DNSPolicy != "" {
causes = append(causes, validateDNSPolicy(&spec.DNSPolicy, field.Child("dnsPolicy"))...)
}
causes = append(causes, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, field.Child("dnsConfig"))...)
causes = append(causes, validateLiveMigration(field, spec, config)...)
causes = append(causes, validateMDEVRamFB(field, spec)...)
causes = append(causes, validateHostDevicesWithPassthroughEnabled(field, spec, config)...)
causes = append(causes, validateSoundDevices(field, spec)...)
causes = append(causes, validateLaunchSecurity(field, spec, config)...)
causes = append(causes, validateVSOCK(field, spec, config)...)
causes = append(causes, validatePersistentReservation(field, spec, config)...)
causes = append(causes, validateDownwardMetrics(field, spec, config)...)
causes = append(causes, validateFilesystemsWithVirtIOFSEnabled(field, spec, config)...)
causes = append(causes, validateVideoConfig(field, spec, config)...)
causes = append(causes, validatePanicDevices(field, spec, config)...)
return causes
}
func validateFilesystemsWithVirtIOFSEnabled(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) (causes []metav1.StatusCause) {
if spec.Domain.Devices.Filesystems == nil {
return causes
}
volumes := storagetypes.GetVolumesByName(spec)
for _, fs := range spec.Domain.Devices.Filesystems {
volume, ok := volumes[fs.Name]
if !ok {
continue
}
if storagetypes.IsStorageVolume(volume) && (!config.VirtiofsStorageEnabled()) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "virtiofs is not allowed: virtiofs feature gate is not enabled for PVC",
Field: field.Child("domain", "devices", "filesystems").String(),
})
}
}
return causes
}
func validateDownwardMetrics(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
// Check if serial and feature gate is enabled
if downwardmetrics.HasDevice(spec) && !config.DownwardMetricsEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "downwardMetrics virtio serial is not allowed: DownwardMetrics feature gate is not enabled",
Field: field.Child("domain", "devices", "downwardMetrics").String(),
})
}
return causes
}
func validateVirtualMachineInstanceSpecVolumeDisks(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
diskAndFilesystemNames := make(map[string]struct{})
for _, disk := range spec.Domain.Devices.Disks {
diskAndFilesystemNames[disk.Name] = struct{}{}
}
for _, fs := range spec.Domain.Devices.Filesystems {
diskAndFilesystemNames[fs.Name] = struct{}{}
}
// Validate that volumes match disks and filesystems correctly
for idx, volume := range spec.Volumes {
if volume.MemoryDump != nil {
continue
}
if _, matchingDiskExists := diskAndFilesystemNames[volume.Name]; !matchingDiskExists {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(nameOfTypeNotFoundMessagePattern, field.Child("domain", "volumes").Index(idx).Child("name").String(), volume.Name),
Field: field.Child("domain", "volumes").Index(idx).Child("name").String(),
})
}
}
return causes
}
func validateInterfaceBootOrder(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, bootOrderMap map[uint]bool) (causes []metav1.StatusCause) {
for idx, iface := range spec.Domain.Devices.Interfaces {
if iface.BootOrder != nil {
order := *iface.BootOrder
// Verify boot order is greater than 0, if provided
if order < 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have a boot order > 0, if supplied", field.Index(idx).String()),
Field: field.Index(idx).Child("bootOrder").String(),
})
} else {
// verify that there are no duplicate boot orders
if bootOrderMap[order] {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Boot order for %s already set for a different device.", field.Child("domain", "devices", "interfaces").Index(idx).Child("bootOrder").String()),
Field: field.Child("domain", "devices", "interfaces").Index(idx).Child("bootOrder").String(),
})
}
bootOrderMap[order] = true
}
}
}
return causes
}
func validateInputDevices(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) (causes []metav1.StatusCause) {
for idx, input := range spec.Domain.Devices.Inputs {
if input.Bus != v1.InputBusVirtio && input.Bus != v1.InputBusUSB && input.Bus != "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Input device can have only virtio or usb bus.",
Field: field.Child("domain", "devices", "inputs").Index(idx).Child("bus").String(),
})
}
if input.Type != v1.InputTypeTablet {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Input device can have only tablet type.",
Field: field.Child("domain", "devices", "inputs").Index(idx).Child("type").String(),
})
}
}
return causes
}
func validateIOThreadsPolicy(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.IOThreadsPolicy == nil {
return causes
}
isValidPolicy := func(policy v1.IOThreadsPolicy) bool {
for _, p := range validIOThreadsPolicies {
if policy == p {
return true
}
}
return false
}
if !isValidPolicy(*spec.Domain.IOThreadsPolicy) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Invalid IOThreadsPolicy (%s)", *spec.Domain.IOThreadsPolicy),
Field: field.Child("domain", "ioThreadsPolicy").String(),
})
}
if *spec.Domain.IOThreadsPolicy == v1.IOThreadsPolicySupplementalPool &&
(spec.Domain.IOThreads == nil || spec.Domain.IOThreads.SupplementalPoolThreadCount == nil ||
*spec.Domain.IOThreads.SupplementalPoolThreadCount < 1) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "the number of iothreads needs to be set and positive for the dedicated policy",
Field: field.Child("domain", "ioThreads", "count").String(),
})
}
return causes
}
func validateProbe(field *k8sfield.Path, probe *v1.Probe) []metav1.StatusCause {
var causes []metav1.StatusCause
if probe == nil {
return causes
}
numHandlers := 0
if probe.HTTPGet != nil {
numHandlers++
}
if probe.TCPSocket != nil {
numHandlers++
}
if probe.Exec != nil {
numHandlers++
}
if probe.GuestAgentPing != nil {
numHandlers++
}
if numHandlers > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have exactly one probe type set", field),
Field: field.String(),
})
}
if numHandlers < 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("either %s, %s or %s must be set if a %s is specified",
field.Child("tcpSocket").String(),
field.Child("exec").String(),
field.Child("httpGet").String(),
field,
),
Field: field.String(),
})
}
return causes
}
func appendStatusCauseForProbeNotAllowedWithNoPodNetworkPresent(field *k8sfield.Path, probe *v1.Probe, causes []metav1.StatusCause) []metav1.StatusCause {
if probe == nil {
return causes
}
if probe.HTTPGet != nil {
causes = append(causes, podNetworkRequiredStatusCause(field.Child("httpGet")))
}
if probe.TCPSocket != nil {
causes = append(causes, podNetworkRequiredStatusCause(field.Child("tcpSocket")))
}
return causes
}
func podNetworkRequiredStatusCause(field *k8sfield.Path) metav1.StatusCause {
return metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s is only allowed if the Pod Network is attached", field.String()),
Field: field.String(),
}
}
func isValidEvictionStrategy(evictionStrategy *v1.EvictionStrategy) bool {
return evictionStrategy == nil ||
*evictionStrategy == v1.EvictionStrategyLiveMigrate ||
*evictionStrategy == v1.EvictionStrategyLiveMigrateIfPossible ||
*evictionStrategy == v1.EvictionStrategyNone ||
*evictionStrategy == v1.EvictionStrategyExternal
}
func validateLiveMigration(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
evictionStrategy := config.GetConfig().EvictionStrategy
if spec.EvictionStrategy != nil {
evictionStrategy = spec.EvictionStrategy
}
if !isValidEvictionStrategy(evictionStrategy) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s is set with an unrecognized option: %s", field.Child("evictionStrategy").String(), *spec.EvictionStrategy),
Field: field.Child("evictionStrategy").String(),
})
}
return causes
}
func countConfiguredMDEVRamFBs(spec *v1.VirtualMachineInstanceSpec) int {
count := 0
for _, device := range spec.Domain.Devices.GPUs {
if device.VirtualGPUOptions != nil &&
device.VirtualGPUOptions.Display != nil &&
(device.VirtualGPUOptions.Display.Enabled == nil || *device.VirtualGPUOptions.Display.Enabled) &&
(device.VirtualGPUOptions.Display.RamFB == nil || (device.VirtualGPUOptions.Display.RamFB.Enabled != nil && *device.VirtualGPUOptions.Display.RamFB.Enabled)) {
count++
}
}
return count
}
func validateMDEVRamFB(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if countConfiguredMDEVRamFBs(spec) > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "configuring multiple displays with ramfb is not valid ",
Field: field.Child("GPUs").String(),
})
}
return causes
}
func validateHostDevicesWithPassthroughEnabled(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Devices.HostDevices != nil && !config.HostDevicesPassthroughEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Host Devices feature gate is not enabled in kubevirt-config",
Field: field.Child("HostDevices").String(),
})
}
return causes
}
func validateSoundDevices(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Devices.Sound == nil {
return causes
}
model := spec.Domain.Devices.Sound.Model
if model != "" && model != "ich9" && model != "ac97" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Sound device type is not supported. Options: 'ich9' or 'ac97'",
Field: field.Child("Sound").String(),
})
}
if spec.Domain.Devices.Sound.Name == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Sound device requires a name field.",
Field: field.Child("Sound").String(),
})
}
return causes
}
func validateLaunchSecurity(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
launchSecurity := spec.Domain.LaunchSecurity
if launchSecurity == nil {
return causes
}
arch := spec.Architecture
switch arch {
case "amd64":
causes = append(causes, webhooks.ValidateLaunchSecurityAmd64(field, spec, config)...)
case "s390x":
causes = append(causes, webhooks.ValidateLaunchSecurityS390x(field, spec, config)...)
default:
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("No launchSecurity support for architecture: %s", arch),
Field: field.Child("architecture").String(),
})
}
return causes
}
func validateBootOrder(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
// used to validate uniqueness of boot orders among disks and interfaces
bootOrderMap := make(map[uint]bool)
volumeNameMap := make(map[string]*v1.Volume)
for i, volume := range spec.Volumes {
volumeNameMap[volume.Name] = &spec.Volumes[i]
}
// Validate disks match volumes correctly
for idx, disk := range spec.Domain.Devices.Disks {
var matchingVolume *v1.Volume
matchingVolume, volumeExists := volumeNameMap[disk.Name]
if !volumeExists {
if disk.CDRom == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(nameOfTypeNotFoundMessagePattern, field.Child("domain", "devices", "disks").Index(idx).Child("Name").String(), disk.Name),
Field: field.Child("domain", "devices", "disks").Index(idx).Child("name").String(),
})
} else if !config.DeclarativeHotplugVolumesEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s feature gate not enabled, cannot define an empty CD-ROM disk", featuregate.DeclarativeHotplugVolumesGate),
Field: field.Child("domain", "devices", "disks").Index(idx).Child("name").String(),
})
}
}
// Verify Lun disks are only mapped to network/block devices.
if disk.LUN != nil && volumeExists && matchingVolume.PersistentVolumeClaim == nil && matchingVolume.DataVolume == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s can only be mapped to a DataVolume or PersistentVolumeClaim volume.", field.Child("domain", "devices", "disks").Index(idx).Child("lun").String()),
Field: field.Child("domain", "devices", "disks").Index(idx).Child("lun").String(),
})
}
// Verify that DownwardMetrics is mapped to disk
if volumeExists && matchingVolume.DownwardMetrics != nil {
if disk.Disk == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("DownwardMetrics volume must be mapped to a disk, but disk is not set on %v.", field.Child("domain", "devices", "disks").Index(idx).Child("disk").String()),
Field: field.Child("domain", "devices", "disks").Index(idx).Child("disk").String(),
})
} else if disk.Disk != nil && disk.Disk.Bus != v1.DiskBusVirtio && disk.Disk.Bus != "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("DownwardMetrics volume must be mapped to virtio bus, but %v is set to %v", field.Child("domain", "devices", "disks").Index(idx).Child("disk").Child("bus").String(), disk.Disk.Bus),
Field: field.Child("domain", "devices", "disks").Index(idx).Child("disk").Child("bus").String(),
})
}
}
// verify that there are no duplicate boot orders
if disk.BootOrder != nil {
order := *disk.BootOrder
if bootOrderMap[order] {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Boot order for %s already set for a different device.", field.Child("domain", "devices", "disks").Index(idx).Child("bootOrder").String()),
Field: field.Child("domain", "devices", "disks").Index(idx).Child("bootOrder").String(),
})
}
bootOrderMap[order] = true
}
}
causes = append(causes, validateInterfaceBootOrder(field, spec, bootOrderMap)...)
return causes
}
func validateCPUFeaturePolicies(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.CPU != nil && spec.Domain.CPU.Features != nil {
for idx, feature := range spec.Domain.CPU.Features {
if _, exists := validCPUFeaturePolicies[feature.Policy]; !exists {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("CPU feature %s uses policy %s that is not supported.", feature.Name, feature.Policy),
Field: field.Child("domain", "cpu", "features").Index(idx).Child("policy").String(),
})
}
}
}
return causes
}
func validateCPUIsolatorThread(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.CPU != nil && spec.Domain.CPU.IsolateEmulatorThread && !spec.Domain.CPU.DedicatedCPUPlacement {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "IsolateEmulatorThread should be only set in combination with DedicatedCPUPlacement",
Field: field.Child("domain", "cpu", "isolateEmulatorThread").String(),
})
}
return causes
}
func validateCpuPinning(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.CPU != nil && spec.Domain.CPU.DedicatedCPUPlacement {
causes = append(causes, validateMemoryLimitAndRequestProvided(field, spec)...)
causes = append(causes, validateCPURequestIsInteger(field, spec)...)
causes = append(causes, validateCPULimitIsInteger(field, spec)...)
causes = append(causes, validateMemoryRequestsAndLimits(field, spec)...)
causes = append(causes, validateRequestLimitOrCoresProvidedOnDedicatedCPUPlacement(field, spec)...)
causes = append(causes, validateRequestEqualsLimitOnDedicatedCPUPlacement(field, spec)...)
causes = append(causes, validateRequestOrLimitWithCoresProvidedOnDedicatedCPUPlacement(field, spec)...)
causes = append(causes, validateThreadCountOnArchitecture(field, spec, config)...)
causes = append(causes, validateThreadCountOnDedicatedCPUPlacement(field, spec)...)
}
return causes
}
func validateNUMA(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.CPU != nil && spec.Domain.CPU.NUMA != nil && spec.Domain.CPU.NUMA.GuestMappingPassthrough != nil {
if !config.NUMAEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("NUMA feature gate is not enabled in kubevirt-config, invalid entry %s",
field.Child("domain", "cpu", "numa", "guestMappingPassthrough").String()),
Field: field.Child("domain", "cpu", "numa", "guestMappingPassthrough").String(),
})
}
if !spec.Domain.CPU.DedicatedCPUPlacement {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must be set to true when NUMA topology strategy is set in %s",
field.Child("domain", "cpu", "dedicatedCpuPlacement").String(),
field.Child("domain", "cpu", "numa", "guestMappingPassthrough").String(),
),
Field: field.Child("domain", "cpu", "numa", "guestMappingPassthrough").String(),
})
}
if spec.Domain.Memory == nil || spec.Domain.Memory.Hugepages == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must be requested when NUMA topology strategy is set in %s",
field.Child("domain", "memory", "hugepages").String(),
field.Child("domain", "cpu", "numa", "guestMappingPassthrough").String(),
),
Field: field.Child("domain", "cpu", "numa", "guestMappingPassthrough").String(),
})
}
}
return causes
}
func validateThreadCountOnArchitecture(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
arch := spec.Architecture
if arch == "" {
arch = config.GetDefaultArchitecture()
}
// Verify CPU thread count requested is 1 for ARM64 VMI architecture.
if spec.Domain.CPU != nil && spec.Domain.CPU.Threads > 1 && virtconfig.IsARM64(arch) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("threads must not be greater than 1 at %v (got %v) when %v is arm64",
field.Child("domain", "cpu", "threads").String(),
spec.Domain.CPU.Threads,
field.Child("architecture").String(),
),
Field: field.Child("architecture").String(),
})
}
return causes
}
func validateThreadCountOnDedicatedCPUPlacement(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.CPU != nil && spec.Domain.CPU.Threads > 2 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Not more than two threads must be provided at %v (got %v) when DedicatedCPUPlacement is true",
field.Child("domain", "cpu", "threads").String(),
spec.Domain.CPU.Threads,
),
Field: field.Child("domain", "cpu", "dedicatedCpuPlacement").String(),
})
}
return causes
}
func validateRequestOrLimitWithCoresProvidedOnDedicatedCPUPlacement(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if (spec.Domain.Resources.Requests.Cpu().Value() > 0 || spec.Domain.Resources.Limits.Cpu().Value() > 0) && hwutil.GetNumberOfVCPUs(spec.Domain.CPU) > 0 &&
spec.Domain.Resources.Requests.Cpu().Value() != hwutil.GetNumberOfVCPUs(spec.Domain.CPU) && spec.Domain.Resources.Limits.Cpu().Value() != hwutil.GetNumberOfVCPUs(spec.Domain.CPU) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s or %s must not be provided at the same time with %s when DedicatedCPUPlacement is true ",
field.Child("domain", "resources", "requests", "cpu").String(),
field.Child("domain", "resources", "limits", "cpu").String(),
field.Child("domain", "cpu", "cores").String(),
),
Field: field.Child("domain", "cpu", "dedicatedCpuPlacement").String(),
})
}
return causes
}
func validateRequestEqualsLimitOnDedicatedCPUPlacement(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Requests.Cpu().Value() > 0 && spec.Domain.Resources.Limits.Cpu().Value() > 0 && spec.Domain.Resources.Requests.Cpu().Value() != spec.Domain.Resources.Limits.Cpu().Value() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s or %s must be equal when DedicatedCPUPlacement is true ",
field.Child("domain", "resources", "requests", "cpu").String(),
field.Child("domain", "resources", "limits", "cpu").String(),
),
Field: field.Child("domain", "cpu", "dedicatedCpuPlacement").String(),
})
}
return causes
}
func validateRequestLimitOrCoresProvidedOnDedicatedCPUPlacement(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Requests.Cpu().Value() == 0 && spec.Domain.Resources.Limits.Cpu().Value() == 0 && hwutil.GetNumberOfVCPUs(spec.Domain.CPU) == 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("either %s or %s or %s must be provided when DedicatedCPUPlacement is true ",
field.Child("domain", "resources", "requests", "cpu").String(),
field.Child("domain", "resources", "limits", "cpu").String(),
field.Child("domain", "cpu", "cores").String(),
),
Field: field.Child("domain", "cpu", "dedicatedCpuPlacement").String(),
})
}
return causes
}
func validateStartStrategy(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.StartStrategy == nil {
return causes
}
if *spec.StartStrategy != v1.StartStrategyPaused {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s is set with an unrecognized option: %s", field.Child("startStrategy").String(), *spec.StartStrategy),
Field: field.Child("startStrategy").String(),
})
} else if spec.LivenessProbe != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("either %s or %s should be provided.Pausing VMI with LivenessProbe is not supported",
field.Child("startStrategy").String(),
field.Child("livenessProbe").String(),
),
Field: field.Child("startStrategy").String(),
})
}
return causes
}
func validateMemoryRequestsAndLimits(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Requests.Memory().Value() > 0 && spec.Domain.Resources.Limits.Memory().Value() > 0 && spec.Domain.Resources.Requests.Memory().Value() != spec.Domain.Resources.Limits.Memory().Value() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must be equal to %s",
field.Child("domain", "resources", "requests", "memory").String(),
field.Child("domain", "resources", "limits", "memory").String(),
),
Field: field.Child("domain", "resources", "requests", "memory").String(),
})
}
return causes
}
func validateCPULimitIsInteger(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Limits.Cpu().Value() > 0 && spec.Domain.Resources.Limits.Cpu().Value()*1000 != spec.Domain.Resources.Limits.Cpu().MilliValue() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "provided resources CPU limits must be an interger",
Field: field.Child("domain", "resources", "limits", "cpu").String(),
})
}
return causes
}
func validateCPURequestIsInteger(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Requests.Cpu().Value() > 0 && spec.Domain.Resources.Requests.Cpu().Value()*1000 != spec.Domain.Resources.Requests.Cpu().MilliValue() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "provided resources CPU requests must be an interger",
Field: field.Child("domain", "resources", "requests", "cpu").String(),
})
}
return causes
}
func validateMemoryLimitAndRequestProvided(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Limits.Memory().Value() == 0 && spec.Domain.Resources.Requests.Memory().Value() == 0 &&
spec.Domain.Memory.Hugepages == nil && spec.Domain.Memory.Guest.Value() == 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s, %s, %s or %s should be provided",
field.Child("domain", "resources", "requests", "memory").String(),
field.Child("domain", "resources", "limits", "memory").String(),
field.Child("domain", "memory", "hugepages").String(),
field.Child("domain", "memory", "guest").String(),
),
Field: field.Child("domain", "resources", "limits", "memory").String(),
})
}
return causes
}
func validateCpuRequestDoesNotExceedLimit(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Limits.Cpu().MilliValue() > 0 &&
spec.Domain.Resources.Requests.Cpu().MilliValue() > spec.Domain.Resources.Limits.Cpu().MilliValue() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s '%s' is greater than %s '%s'", field.Child("domain", "resources", "requests", "cpu").String(),
spec.Domain.Resources.Requests.Cpu(),
field.Child("domain", "resources", "limits", "cpu").String(),
spec.Domain.Resources.Limits.Cpu()),
Field: field.Child("domain", "resources", "requests", "cpu").String(),
})
}
return causes
}
func validateCPULimitNotNegative(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Limits.Cpu().MilliValue() < 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(valueMustBePositiveMessagePattern, field.Child("domain", "resources", "limits", "cpu").String(),
spec.Domain.Resources.Limits.Cpu()),
Field: field.Child("domain", "resources", "limits", "cpu").String(),
})
}
return causes
}
func validateCPURequestNotNegative(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Requests.Cpu().MilliValue() < 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(valueMustBePositiveMessagePattern, field.Child("domain", "resources", "requests", "cpu").String(),
spec.Domain.Resources.Requests.Cpu()),
Field: field.Child("domain", "resources", "requests", "cpu").String(),
})
}
return causes
}
func validateEmulatedMachine(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if machine := spec.Domain.Machine; machine != nil && len(machine.Type) > 0 {
supportedMachines := config.GetEmulatedMachines(spec.Architecture)
var match = false
for _, val := range supportedMachines {
// The pattern are hardcoded, so this should not throw an error
if ok, _ := filepath.Match(val, machine.Type); ok {
match = true
break
}
}
if !match {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s is not supported: %s (allowed values: %v)",
field.Child("domain", "machine", "type").String(),
machine.Type,
supportedMachines,
),
Field: field.Child("domain", "machine", "type").String(),
})
}
}
return causes
}
func validateGuestMemoryLimit(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if config.IsVMRolloutStrategyLiveUpdate() {
return causes
}
if spec.Domain.Memory == nil || spec.Domain.Memory.Guest == nil {
return causes
}
limits := spec.Domain.Resources.Limits.Memory().Value()
guest := spec.Domain.Memory.Guest.Value()
if limits < guest && limits != 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s '%s' must be equal to or less than the memory limit %s '%s'",
field.Child("domain", "memory", "guest").String(),
spec.Domain.Memory.Guest,
field.Child("domain", "resources", "limits", "memory").String(),
spec.Domain.Resources.Limits.Memory(),
),
Field: field.Child("domain", "memory", "guest").String(),
})
}
return causes
}
func validateHugepagesMemoryRequests(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Memory == nil || spec.Domain.Memory.Hugepages == nil {
return causes
}
hugepagesSize, err := resource.ParseQuantity(spec.Domain.Memory.Hugepages.PageSize)
if err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s '%s': %s",
field.Child("domain", "memory", "hugepages", "pageSize").String(),
spec.Domain.Memory.Hugepages.PageSize,
resource.ErrFormatWrong,
),
Field: field.Child("domain", "memory", "hugepages", "pageSize").String(),
})
return causes
}
vmMemory := spec.Domain.Resources.Requests.Memory().Value()
if vmMemory == 0 && spec.Domain.Memory != nil {
vmMemory = spec.Domain.Memory.Guest.Value()
}
if vmMemory == 0 {
vmMemory = spec.Domain.Resources.Limits.Memory().Value()
}
if vmMemory != 0 && vmMemory < hugepagesSize.Value() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s '%s' must be equal to or larger than page size %s '%s'",
field.Child("domain", "resources", "requests", "memory").String(),
spec.Domain.Resources.Requests.Memory(),
field.Child("domain", "memory", "hugepages", "pageSize").String(),
spec.Domain.Memory.Hugepages.PageSize,
),
Field: field.Child("domain", "resources", "requests", "memory").String(),
})
} else if vmMemory%hugepagesSize.Value() != 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s '%s' is not a multiple of the page size %s '%s'",
field.Child("domain", "resources", "requests", "memory").String(),
spec.Domain.Resources.Requests.Memory(),
field.Child("domain", "memory", "hugepages", "pageSize").String(),
spec.Domain.Memory.Hugepages.PageSize,
),
Field: field.Child("domain", "resources", "requests", "memory").String(),
})
}
return causes
}
func validateMemoryLimitsNegativeOrNull(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Limits.Memory().Value() < 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(valueMustBePositiveMessagePattern, field.Child("domain", "resources", "limits", "memory").String(),
spec.Domain.Resources.Limits.Memory()),
Field: field.Child("domain", "resources", "limits", "memory").String(),
})
}
if spec.Domain.Resources.Limits.Memory().Value() > 0 &&
spec.Domain.Resources.Requests.Memory().Value() > spec.Domain.Resources.Limits.Memory().Value() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s '%s' is greater than %s '%s'", field.Child("domain", "resources", "requests", "memory").String(),
spec.Domain.Resources.Requests.Memory(),
field.Child("domain", "resources", "limits", "memory").String(),
spec.Domain.Resources.Limits.Memory()),
Field: field.Child("domain", "resources", "requests", "memory").String(),
})
}
return causes
}
func validateMemoryRequestsNegativeOrNull(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Resources.Requests.Memory().Value() < 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(valueMustBePositiveMessagePattern, field.Child("domain", "resources", "requests", "memory").String(),
spec.Domain.Resources.Requests.Memory()),
Field: field.Child("domain", "resources", "requests", "memory").String(),
})
} else if spec.Domain.Resources.Requests.Memory().Value() > 0 && spec.Domain.Resources.Requests.Memory().Cmp(resource.MustParse("1M")) < 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s '%s': must be greater than or equal to 1M.", field.Child("domain", "resources", "requests", "memory").String(),
spec.Domain.Resources.Requests.Memory()),
Field: field.Child("domain", "resources", "requests", "memory").String(),
})
}
return causes
}
func validateSubdomainDNSSubdomainRules(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Subdomain == "" {
return causes
}
if errors := validation.IsDNS1123Subdomain(spec.Subdomain); len(errors) != 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s does not conform to the kubernetes DNS_SUBDOMAIN rules : %s",
field.Child("subdomain").String(), strings.Join(errors, ", ")),
Field: field.Child("subdomain").String(),
})
}
return causes
}
func validateHostNameNotConformingToDNSLabelRules(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Hostname == "" {
return causes
}
if errors := validation.IsDNS1123Label(spec.Hostname); len(errors) != 0 {
causes = appendNewStatusCauseForHostNameNotConformingToDNSLabelRules(field, causes, errors)
}
return causes
}
func validateRealtime(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.CPU != nil && spec.Domain.CPU.Realtime != nil {
causes = append(causes, validateCPURealtime(field, spec)...)
causes = append(causes, validateMemoryRealtime(field, spec)...)
}
return causes
}
func validateCPURealtime(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if !spec.Domain.CPU.DedicatedCPUPlacement {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("%s must be set to true when %s is used",
field.Child("domain", "cpu", "dedicatedCpuPlacement").String(),
field.Child("domain", "cpu", "realtime").String(),
),
Field: field.Child("domain", "cpu", "dedicatedCpuPlacement").String(),
})
}
return causes
}
func validateMemoryRealtime(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.CPU.NUMA == nil || spec.Domain.CPU.NUMA.GuestMappingPassthrough == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("%s must be defined when %s is used",
field.Child("domain", "cpu", "numa", "guestMappingPassthrough").String(),
field.Child("domain", "cpu", "realtime").String(),
),
Field: field.Child("domain", "cpu", "numa", "guestMappingPassthrough").String(),
})
}
return causes
}
func appendNewStatusCauseForHostNameNotConformingToDNSLabelRules(field *k8sfield.Path, causes []metav1.StatusCause, errors []string) []metav1.StatusCause {
return append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s does not conform to the kubernetes DNS_LABEL rules : %s",
field.Child("hostname").String(), strings.Join(errors, ", ")),
Field: field.Child("hostname").String(),
})
}
// ValidateVirtualMachineInstanceMandatoryFields should be invoked after all defaults and presets are applied.
// It is only meant to be used for VMI reviews, not if they are templates on other objects
func ValidateVirtualMachineInstanceMandatoryFields(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
requests := spec.Domain.Resources.Requests.Memory().Value()
if requests != 0 {
return causes
}
if spec.Domain.Memory == nil || spec.Domain.Memory != nil &&
spec.Domain.Memory.Guest == nil && spec.Domain.Memory.Hugepages == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("no memory requested, at least one of '%s', '%s' or '%s' must be set",
field.Child("domain", "memory", "guest").String(),
field.Child("domain", "memory", "hugepages", "pageSize").String(),
field.Child("domain", "resources", "requests", "memory").String()),
})
}
return causes
}
func ValidateVirtualMachineInstanceMetadata(field *k8sfield.Path, metadata *metav1.ObjectMeta, config *virtconfig.ClusterConfig, isKubeVirtServiceAccount bool) []metav1.StatusCause {
var causes []metav1.StatusCause
annotations := metadata.Annotations
labels := metadata.Labels
// Validate kubevirt.io labels presence. Restricted labels allowed
// to be created only by known service accounts
if !isKubeVirtServiceAccount {
if len(filterKubevirtLabels(labels)) > 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "creation of the following reserved kubevirt.io/ labels on a VMI object is prohibited",
Field: field.Child("labels").String(),
})
}
}
// Validate ignition feature gate if set when the corresponding annotation is found
if annotations[v1.IgnitionAnnotation] != "" && !config.IgnitionEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("ExperimentalIgnitionSupport feature gate is not enabled in kubevirt-config, invalid entry %s",
field.Child("annotations").Child(v1.IgnitionAnnotation).String()),
Field: field.Child("annotations").String(),
})
}
// Validate sidecar feature gate if set when the corresponding annotation is found
if annotations[hooks.HookSidecarListAnnotationName] != "" && !config.SidecarEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("sidecar feature gate is not enabled in kubevirt-config, invalid entry %s",
field.Child("annotations", hooks.HookSidecarListAnnotationName).String()),
Field: field.Child("annotations").String(),
})
}
return causes
}
// Copied from kubernetes/pkg/apis/core/validation/validation.go
func validatePodDNSConfig(dnsConfig *k8sv1.PodDNSConfig, dnsPolicy *k8sv1.DNSPolicy, field *k8sfield.Path) []metav1.StatusCause {
var causes []metav1.StatusCause
// Validate DNSNone case. Must provide at least one DNS name server.
if dnsPolicy != nil && *dnsPolicy == k8sv1.DNSNone {
if dnsConfig == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("must provide `dnsConfig` when `dnsPolicy` is %s", k8sv1.DNSNone),
Field: field.String(),
})
return causes
}
if len(dnsConfig.Nameservers) == 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("must provide at least one DNS nameserver when `dnsPolicy` is %s", k8sv1.DNSNone),
Field: "nameservers",
})
return causes
}
}
if dnsConfig == nil {
return causes
}
// Validate nameservers.
if len(dnsConfig.Nameservers) > maxDNSNameservers {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("must not have more than %v nameservers: %s", maxDNSNameservers, dnsConfig.Nameservers),
Field: "nameservers",
})
}
for _, ns := range dnsConfig.Nameservers {
if ip := net.ParseIP(ns); ip == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("must be valid IP address: %s", ns),
Field: "nameservers",
})
}
}
// Validate searches.
if len(dnsConfig.Searches) > maxDNSSearchPaths {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("must not have more than %v search paths", maxDNSSearchPaths),
Field: "searchDomains",
})
}
// Include the space between search paths.
if len(strings.Join(dnsConfig.Searches, " ")) > maxDNSSearchListChars {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("must not have more than %v characters (including spaces) in the search list", maxDNSSearchListChars),
Field: "searchDomains",
})
}
for _, search := range dnsConfig.Searches {
for _, msg := range validation.IsDNS1123Subdomain(search) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%v", msg),
Field: "searchDomains",
})
}
}
// Validate options.
for _, option := range dnsConfig.Options {
if len(option.Name) == 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Option.Name must not be empty"),
Field: "options",
})
}
}
return causes
}
// Copied from kubernetes/pkg/apis/core/validation/validation.go
func validateDNSPolicy(dnsPolicy *k8sv1.DNSPolicy, field *k8sfield.Path) []metav1.StatusCause {
var causes []metav1.StatusCause
switch *dnsPolicy {
case k8sv1.DNSClusterFirstWithHostNet, k8sv1.DNSClusterFirst, k8sv1.DNSDefault, k8sv1.DNSNone, "":
default:
validValues := []string{string(k8sv1.DNSClusterFirstWithHostNet), string(k8sv1.DNSClusterFirst), string(k8sv1.DNSDefault), string(k8sv1.DNSNone), ""}
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("DNSPolicy: %s is not supported, valid values: %s", *dnsPolicy, validValues),
Field: field.String(),
})
}
return causes
}
func validateBootloader(field *k8sfield.Path, bootloader *v1.Bootloader) []metav1.StatusCause {
var causes []metav1.StatusCause
if bootloader != nil && bootloader.EFI != nil && bootloader.BIOS != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s has both EFI and BIOS configured, but they are mutually exclusive.", field.String()),
Field: field.String(),
})
}
return causes
}
func validateFirmwareACPI(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Firmware == nil || spec.Domain.Firmware.ACPI == nil {
return causes
}
acpi := spec.Domain.Firmware.ACPI
if acpi.SlicNameRef == "" && acpi.MsdmNameRef == "" {
return append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("ACPI was set but no SLIC nor MSDM volume reference was set"),
Field: field.String(),
})
}
causes = append(causes, validateACPIRef(field, acpi.SlicNameRef, spec.Volumes, "slicNameRef")...)
causes = append(causes, validateACPIRef(field, acpi.MsdmNameRef, spec.Volumes, "msdmNameRef")...)
return causes
}
func validateACPIRef(field *k8sfield.Path, nameRef string, volumes []v1.Volume, fieldName string) []metav1.StatusCause {
if nameRef == "" {
return nil
}
for _, volume := range volumes {
if nameRef != volume.Name {
continue
}
if volume.Secret != nil {
return nil
}
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s refers to Volume of unsupported type.", field.String()),
Field: field.Child(fieldName).String(),
}}
}
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s does not have a matching Volume.", field.String()),
Field: field.Child(fieldName).String(),
}}
}
func validateFirmware(field *k8sfield.Path, firmware *v1.Firmware) []metav1.StatusCause {
var causes []metav1.StatusCause
if firmware != nil {
causes = append(causes, validateBootloader(field.Child("bootloader"), firmware.Bootloader)...)
causes = append(causes, validateKernelBoot(field.Child("kernelBoot"), firmware.KernelBoot)...)
}
return causes
}
func efiBootEnabled(firmware *v1.Firmware) bool {
return firmware != nil && firmware.Bootloader != nil && firmware.Bootloader.EFI != nil
}
func secureBootEnabled(firmware *v1.Firmware) bool {
return efiBootEnabled(firmware) &&
(firmware.Bootloader.EFI.SecureBoot == nil || *firmware.Bootloader.EFI.SecureBoot)
}
func smmFeatureEnabled(features *v1.Features) bool {
return features != nil && features.SMM != nil && (features.SMM.Enabled == nil || *features.SMM.Enabled)
}
func validateDomainSpec(field *k8sfield.Path, spec *v1.DomainSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
causes = append(causes, storageadmitters.ValidateDisks(field.Child("devices").Child("disks"), spec.Devices.Disks)...)
causes = append(causes, validateFirmware(field.Child("firmware"), spec.Firmware)...)
if secureBootEnabled(spec.Firmware) && !smmFeatureEnabled(spec.Features) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s has EFI SecureBoot enabled. SecureBoot requires SMM, which is currently disabled.", field.String()),
Field: field.String(),
})
}
return causes
}
func validateAccessCredentials(field *k8sfield.Path, accessCredentials []v1.AccessCredential, volumes []v1.Volume) []metav1.StatusCause {
var causes []metav1.StatusCause
hasNoCloudVolume := false
for _, volume := range volumes {
if volume.CloudInitNoCloud != nil {
hasNoCloudVolume = true
break
}
}
hasConfigDriveVolume := false
for _, volume := range volumes {
if volume.CloudInitConfigDrive != nil {
hasConfigDriveVolume = true
break
}
}
for idx, accessCred := range accessCredentials {
count := 0
// one access cred type must be selected
if accessCred.SSHPublicKey != nil {
count++
sourceCount := 0
methodCount := 0
if accessCred.SSHPublicKey.Source.Secret != nil {
sourceCount++
}
if accessCred.SSHPublicKey.PropagationMethod.NoCloud != nil {
methodCount++
if !hasNoCloudVolume {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s requires a noCloud volume to exist when the noCloud propagationMethod is in use.", field.Index(idx).String()),
Field: field.Index(idx).Child("sshPublicKey", "propagationMethod").String(),
})
}
}
if accessCred.SSHPublicKey.PropagationMethod.ConfigDrive != nil {
methodCount++
if !hasConfigDriveVolume {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s requires a configDrive volume to exist when the configDrive propagationMethod is in use.", field.Index(idx).String()),
Field: field.Index(idx).Child("sshPublicKey", "propagationMethod").String(),
})
}
}
if accessCred.SSHPublicKey.PropagationMethod.QemuGuestAgent != nil {
if len(accessCred.SSHPublicKey.PropagationMethod.QemuGuestAgent.Users) == 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s requires at least one user to be present in the users list", field.Index(idx).String()),
Field: field.Index(idx).Child("sshPublicKey", "propagationMethod", "qemuGuestAgent", "users").String(),
})
}
methodCount++
}
if sourceCount != 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have exactly one source set", field.Index(idx).String()),
Field: field.Index(idx).Child("sshPublicKey", "source").String(),
})
}
if methodCount != 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have exactly one propagationMethod set", field.Index(idx).String()),
Field: field.Index(idx).Child("sshPublicKey", "propagationMethod").String(),
})
}
}
if accessCred.UserPassword != nil {
count++
sourceCount := 0
methodCount := 0
if accessCred.UserPassword.Source.Secret != nil {
sourceCount++
}
if accessCred.UserPassword.PropagationMethod.QemuGuestAgent != nil {
methodCount++
}
if sourceCount != 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have exactly one source set", field.Index(idx).String()),
Field: field.Index(idx).Child("userPassword", "source").String(),
})
}
if methodCount != 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have exactly one propagationMethod set", field.Index(idx).String()),
Field: field.Index(idx).Child("userPassword", "propagationMethod").String(),
})
}
}
if count != 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have exactly one access credential type set", field.Index(idx).String()),
Field: field.Index(idx).String(),
})
}
}
return causes
}
func validateVolumes(field *k8sfield.Path, volumes []v1.Volume, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
nameMap := make(map[string]int)
// check that we have max 1 instance of below disks
serviceAccountVolumeCount := 0
downwardMetricVolumeCount := 0
memoryDumpVolumeCount := 0
for idx, volume := range volumes {
// verify name is unique
otherIdx, ok := nameMap[volume.Name]
if !ok {
nameMap[volume.Name] = idx
} else {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s and %s must not have the same Name.", field.Index(idx).String(), field.Index(otherIdx).String()),
Field: field.Index(idx).Child("name").String(),
})
}
// Verify exactly one source is set
volumeSourceSetCount := 0
if volume.PersistentVolumeClaim != nil {
volumeSourceSetCount++
}
if volume.Sysprep != nil {
volumeSourceSetCount++
}
if volume.CloudInitNoCloud != nil {
volumeSourceSetCount++
}
if volume.CloudInitConfigDrive != nil {
volumeSourceSetCount++
}
if volume.ContainerDisk != nil {
volumeSourceSetCount++
}
if volume.Ephemeral != nil {
volumeSourceSetCount++
}
if volume.EmptyDisk != nil {
volumeSourceSetCount++
}
if volume.HostDisk != nil {
volumeSourceSetCount++
}
if volume.DataVolume != nil {
if volume.DataVolume.Name == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: "DataVolume 'name' must be set",
Field: field.Index(idx).Child("name").String(),
})
}
volumeSourceSetCount++
}
if volume.ConfigMap != nil {
volumeSourceSetCount++
}
if volume.Secret != nil {
volumeSourceSetCount++
}
if volume.DownwardAPI != nil {
volumeSourceSetCount++
}
if volume.ServiceAccount != nil {
volumeSourceSetCount++
serviceAccountVolumeCount++
}
if volume.DownwardMetrics != nil {
downwardMetricVolumeCount++
volumeSourceSetCount++
}
if volume.MemoryDump != nil {
memoryDumpVolumeCount++
volumeSourceSetCount++
}
if volumeSourceSetCount != 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have exactly one source type set", field.Index(idx).String()),
Field: field.Index(idx).String(),
})
}
// Verify cloud init data is within size limits
if volume.CloudInitNoCloud != nil || volume.CloudInitConfigDrive != nil {
var userDataSecretRef, networkDataSecretRef *k8sv1.LocalObjectReference
var dataSourceType, userData, userDataBase64, networkData, networkDataBase64 string
if volume.CloudInitNoCloud != nil {
dataSourceType = "cloudInitNoCloud"
userDataSecretRef = volume.CloudInitNoCloud.UserDataSecretRef
userDataBase64 = volume.CloudInitNoCloud.UserDataBase64
userData = volume.CloudInitNoCloud.UserData
networkDataSecretRef = volume.CloudInitNoCloud.NetworkDataSecretRef
networkDataBase64 = volume.CloudInitNoCloud.NetworkDataBase64
networkData = volume.CloudInitNoCloud.NetworkData
} else if volume.CloudInitConfigDrive != nil {
dataSourceType = "cloudInitConfigDrive"
userDataSecretRef = volume.CloudInitConfigDrive.UserDataSecretRef
userDataBase64 = volume.CloudInitConfigDrive.UserDataBase64
userData = volume.CloudInitConfigDrive.UserData
networkDataSecretRef = volume.CloudInitConfigDrive.NetworkDataSecretRef
networkDataBase64 = volume.CloudInitConfigDrive.NetworkDataBase64
networkData = volume.CloudInitConfigDrive.NetworkData
}
userDataLen := 0
userDataSourceCount := 0
networkDataLen := 0
networkDataSourceCount := 0
if userDataSecretRef != nil && userDataSecretRef.Name != "" {
userDataSourceCount++
}
if userDataBase64 != "" {
userDataSourceCount++
userData, err := base64.StdEncoding.DecodeString(userDataBase64)
if err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s.%s.userDataBase64 is not a valid base64 value.", field.Index(idx).Child(dataSourceType, "userDataBase64").String(), dataSourceType),
Field: field.Index(idx).Child(dataSourceType, "userDataBase64").String(),
})
}
userDataLen = len(userData)
}
if userData != "" {
userDataSourceCount++
userDataLen = len(userData)
}
if userDataSourceCount > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have only one userdatasource set.", field.Index(idx).Child(dataSourceType).String()),
Field: field.Index(idx).Child(dataSourceType).String(),
})
}
if userDataLen > cloudInitUserMaxLen {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s userdata exceeds %d byte limit. Should use UserDataSecretRef for larger data.", field.Index(idx).Child(dataSourceType).String(), cloudInitUserMaxLen),
Field: field.Index(idx).Child(dataSourceType).String(),
})
}
if networkDataSecretRef != nil && networkDataSecretRef.Name != "" {
networkDataSourceCount++
}
if networkDataBase64 != "" {
networkDataSourceCount++
networkData, err := base64.StdEncoding.DecodeString(networkDataBase64)
if err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s.%s.networkDataBase64 is not a valid base64 value.", field.Index(idx).Child(dataSourceType, "networkDataBase64").String(), dataSourceType),
Field: field.Index(idx).Child(dataSourceType, "networkDataBase64").String(),
})
}
networkDataLen = len(networkData)
}
if networkData != "" {
networkDataSourceCount++
networkDataLen = len(networkData)
}
if networkDataSourceCount > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have only one networkdata source set.", field.Index(idx).Child(dataSourceType).String()),
Field: field.Index(idx).Child(dataSourceType).String(),
})
}
if networkDataLen > cloudInitNetworkMaxLen {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s networkdata exceeds %d byte limit. Should use NetworkDataSecretRef for larger data.", field.Index(idx).Child(dataSourceType).String(), cloudInitNetworkMaxLen),
Field: field.Index(idx).Child(dataSourceType).String(),
})
}
if userDataSourceCount == 0 && networkDataSourceCount == 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have at least one userdatasource or one networkdatasource set.", field.Index(idx).Child(dataSourceType).String()),
Field: field.Index(idx).Child(dataSourceType).String(),
})
}
}
if volume.DownwardMetrics != nil && !config.DownwardMetricsEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "downwardMetrics disks are not allowed: DownwardMetrics feature gate is not enabled.",
Field: field.Index(idx).String(),
})
}
// validate HostDisk data
if hostDisk := volume.HostDisk; hostDisk != nil {
if !config.HostDiskEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "HostDisk feature gate is not enabled",
Field: field.Index(idx).String(),
})
}
if hostDisk.Path == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotFound,
Message: fmt.Sprintf("%s is required for hostDisk volume", field.Index(idx).Child("hostDisk", "path").String()),
Field: field.Index(idx).Child("hostDisk", "path").String(),
})
}
if hostDisk.Type != v1.HostDiskExists && hostDisk.Type != v1.HostDiskExistsOrCreate {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s has invalid value '%s', allowed are '%s' or '%s'", field.Index(idx).Child("hostDisk", "type").String(), hostDisk.Type, v1.HostDiskExists, v1.HostDiskExistsOrCreate),
Field: field.Index(idx).Child("hostDisk", "type").String(),
})
}
// if disk.img already exists and user knows that by specifying type 'Disk' it is pointless to set capacity
if hostDisk.Type == v1.HostDiskExists && !hostDisk.Capacity.IsZero() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s is allowed to pass only with %s equal to '%s'", field.Index(idx).Child("hostDisk", "capacity").String(), field.Index(idx).Child("hostDisk", "type").String(), v1.HostDiskExistsOrCreate),
Field: field.Index(idx).Child("hostDisk", "capacity").String(),
})
}
}
if volume.ConfigMap != nil {
if volume.ConfigMap.LocalObjectReference.Name == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(requiredFieldFmt, field.Index(idx).Child("configMap", "name").String()),
Field: field.Index(idx).Child("configMap", "name").String(),
})
}
}
if volume.Secret != nil {
if volume.Secret.SecretName == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(requiredFieldFmt, field.Index(idx).Child("secret", "secretName").String()),
Field: field.Index(idx).Child("secret", "secretName").String(),
})
}
}
if volume.ServiceAccount != nil {
if volume.ServiceAccount.ServiceAccountName == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(requiredFieldFmt, field.Index(idx).Child("serviceAccount", "serviceAccountName").String()),
Field: field.Index(idx).Child("serviceAccount", "serviceAccountName").String(),
})
}
}
}
if serviceAccountVolumeCount > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have max one serviceAccount volume set", field.String()),
Field: field.String(),
})
}
if downwardMetricVolumeCount > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have max one downwardMetric volume set", field.String()),
Field: field.String(),
})
}
if memoryDumpVolumeCount > 1 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s must have max one memory dump volume set", field.String()),
Field: field.String(),
})
}
return causes
}
// Rejects kernel boot defined with initrd/kernel path but without an image
func validateKernelBoot(field *k8sfield.Path, kernelBoot *v1.KernelBoot) []metav1.StatusCause {
var causes []metav1.StatusCause
if kernelBoot == nil {
return causes
}
if kernelBoot.Container == nil {
if kernelBoot.KernelArgs != "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "kernel arguments cannot be provided without an external kernel",
Field: field.Child("kernelArgs").String(),
})
}
return causes
}
container := kernelBoot.Container
containerField := field.Child("container")
if container.Image == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("%s must be defined with an image", containerField),
Field: containerField.Child("image").String(),
})
}
if container.InitrdPath == "" && container.KernelPath == "" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("%s must be defined with at least one of the following: kernelPath, initrdPath", containerField),
Field: containerField.String(),
})
}
if container.KernelPath != "" {
causes = append(causes, storageadmitters.ValidatePath(containerField.Child("kernelPath"), container.KernelPath)...)
}
if container.InitrdPath != "" {
causes = append(causes, storageadmitters.ValidatePath(containerField.Child("initrdPath"), container.InitrdPath)...)
}
return causes
}
// validateSpecAffinity is function that validate spec.affinity
// instead of bring in the whole kubernetes lib we simply copy it from kubernetes/pkg/apis/core/validation/validation.go
func validateSpecAffinity(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Affinity == nil {
return causes
}
errorList := validateAffinity(spec.Affinity, field)
//convert errorList to []metav1.StatusCause
for _, validationErr := range errorList {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: validationErr.Error(),
Field: validationErr.Field,
})
}
return causes
}
// validateSpecTopologySpreadConstraints is function that validate spec.validateSpecTopologySpreadConstraints
// instead of bring in the whole kubernetes lib we simply copy it from kubernetes/pkg/apis/core/validation/validation.go
func validateSpecTopologySpreadConstraints(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.TopologySpreadConstraints == nil {
return causes
}
errorList := validateTopologySpreadConstraints(spec.TopologySpreadConstraints, field.Child("topologySpreadConstraints"))
//convert errorList to []metav1.StatusCause
for _, validationErr := range errorList {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: validationErr.Error(),
Field: validationErr.Field,
})
}
return causes
}
func validateVSOCK(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Devices.AutoattachVSOCK == nil || !*spec.Domain.Devices.AutoattachVSOCK {
return causes
}
if !config.VSOCKEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s feature gate is not enabled in kubevirt-config", featuregate.VSOCKGate),
Field: field.Child("domain", "devices", "autoattachVSOCK").String(),
})
}
return causes
}
func validatePersistentReservation(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if !reservation.HasVMISpecPersistentReservation(spec) {
return causes
}
if !config.PersistentReservationEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("%s feature gate is not enabled in kubevirt-config", featuregate.PersistentReservation),
Field: field.Child("domain", "devices", "disks", "luns", "reservation").String(),
})
}
return causes
}
func validateCPUHotplug(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.CPU != nil && spec.Domain.CPU.MaxSockets != 0 {
if spec.Domain.CPU.Sockets > spec.Domain.CPU.MaxSockets {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Number of sockets in CPU topology is greater than the maximum sockets allowed"),
Field: field.Child("domain", "cpu", "sockets").String(),
})
}
}
return causes
}
func validateVideoConfig(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain.Devices.Video == nil {
return causes
}
if !config.VideoConfigEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Video configuration is specified but the %s feature gate is not enabled", featuregate.VideoConfig),
Field: field.Child("video").String(),
})
return causes
}
if spec.Domain.Devices.AutoattachGraphicsDevice != nil && !*spec.Domain.Devices.AutoattachGraphicsDevice {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Video configuration is not allowed when autoattachGraphicsDevice is set to false",
Field: field.Child("video").String(),
})
}
return causes
}
func validatePanicDeviceModel(field *k8sfield.Path, model *v1.PanicDeviceModel) *metav1.StatusCause {
if model == nil {
return nil
}
if !slices.Contains(validPanicDeviceModels, *model) {
return &metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(invalidPanicDeviceModelErrFmt, *model),
Field: field.String(),
}
}
return nil
}
func validatePanicDevices(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if len(spec.Domain.Devices.PanicDevices) == 0 {
return causes
}
if spec.Domain.Devices.PanicDevices != nil && !config.PanicDevicesEnabled() {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Panic Devices feature gate is not enabled in kubevirt-config",
Field: field.Child("domain", "devices", "panicDevices").String(),
})
return causes
}
arch := spec.Architecture
if arch == "" {
arch = config.GetDefaultArchitecture()
}
if arch == "s390x" {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("custom panic devices are not supported on %s architecture", arch),
Field: field.Child("domain", "devices", "panicDevices").String(),
})
}
for idx, panicDevice := range spec.Domain.Devices.PanicDevices {
if cause := validatePanicDeviceModel(field.Child("domain", "devices", "panicDevices").Index(idx).Child("model"), panicDevice.Model); cause != nil {
causes = append(causes, *cause)
}
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
storageadmitters "kubevirt.io/kubevirt/pkg/storage/admitters"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
"kubevirt.io/kubevirt/pkg/virt-api/webhooks"
)
type VMIPresetAdmitter struct {
}
func (admitter *VMIPresetAdmitter) Admit(_ context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if !webhookutils.ValidateRequestResource(ar.Request.Resource, webhooks.VirtualMachineInstancePresetGroupVersionResource.Group, webhooks.VirtualMachineInstancePresetGroupVersionResource.Resource) {
err := fmt.Errorf("expect resource to be '%s'", webhooks.VirtualMachineInstancePresetGroupVersionResource.Resource)
return webhookutils.ToAdmissionResponseError(err)
}
if resp := webhookutils.ValidateSchema(v1.VirtualMachineInstancePresetGroupVersionKind, ar.Request.Object.Raw); resp != nil {
return resp
}
raw := ar.Request.Object.Raw
vmipreset := v1.VirtualMachineInstancePreset{}
err := json.Unmarshal(raw, &vmipreset)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
causes := ValidateVMIPresetSpec(k8sfield.NewPath("spec"), &vmipreset.Spec)
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{}
reviewResponse.Allowed = true
return &reviewResponse
}
func ValidateVMIPresetSpec(field *k8sfield.Path, spec *v1.VirtualMachineInstancePresetSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Domain == nil {
return append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("missing domain."),
Field: field.Child("domain").String(),
})
}
causes = append(causes, storageadmitters.ValidateDisks(field.Child("domain").Child("devices").Child("disks"), spec.Domain.Devices.Disks)...)
causes = append(causes, validateFirmware(field.Child("domain").Child("firmware"), spec.Domain.Firmware)...)
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"strings"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
storageadmitters "kubevirt.io/kubevirt/pkg/storage/admitters"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
)
const nodeNameExtraInfo = "authentication.kubernetes.io/node-name"
type VMIUpdateAdmitter struct {
clusterConfig *virtconfig.ClusterConfig
kubeVirtServiceAccounts map[string]struct{}
}
func NewVMIUpdateAdmitter(config *virtconfig.ClusterConfig, kubeVirtServiceAccounts map[string]struct{}) *VMIUpdateAdmitter {
return &VMIUpdateAdmitter{
clusterConfig: config,
kubeVirtServiceAccounts: kubeVirtServiceAccounts,
}
}
func (admitter *VMIUpdateAdmitter) Admit(_ context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if resp := webhookutils.ValidateSchema(v1.VirtualMachineInstanceGroupVersionKind, ar.Request.Object.Raw); resp != nil {
return resp
}
// Get new VMI from admission response
newVMI, oldVMI, err := webhookutils.GetVMIFromAdmissionReview(ar)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if admitter.clusterConfig.NodeRestrictionEnabled() && hasRequestOriginatedFromVirtHandler(ar.Request.UserInfo.Username, admitter.kubeVirtServiceAccounts) {
values, exist := ar.Request.UserInfo.Extra[nodeNameExtraInfo]
if exist && len(values) > 0 {
nodeName := values[0]
sourceNode := oldVMI.Status.NodeName
targetNode := ""
if oldVMI.Status.MigrationState != nil {
targetNode = oldVMI.Status.MigrationState.TargetNode
}
// Check that source or target is making this request
if nodeName != sourceNode && (targetNode == "" || nodeName != targetNode) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Node restriction, virt-handler is only allowed to modify VMIs it owns",
},
})
}
// Check that handler is not setting target
if targetNode == "" && newVMI.Status.MigrationState != nil && newVMI.Status.MigrationState.TargetNode != targetNode {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Node restriction, virt-handler is not allowed to set target node",
},
})
}
} else {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Node restriction failed, virt-handler service account is missing node name",
},
})
}
}
// Reject VMI update if VMI spec changed
_, isKubeVirtServiceAccount := admitter.kubeVirtServiceAccounts[ar.Request.UserInfo.Username]
if !equality.Semantic.DeepEqual(newVMI.Spec, oldVMI.Spec) {
// Only allow the KubeVirt SA to modify the VMI spec, since that means it went through the sub resource.
if isKubeVirtServiceAccount {
hotplugResponse := admitHotplug(oldVMI, newVMI, admitter.clusterConfig)
if hotplugResponse != nil {
return hotplugResponse
}
} else {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "update of VMI object is restricted",
},
})
}
}
if !isKubeVirtServiceAccount {
if reviewResponse := admitVMILabelsUpdate(newVMI, oldVMI); reviewResponse != nil {
return reviewResponse
}
}
return &admissionv1.AdmissionResponse{
Allowed: true,
Warnings: warnDeprecatedAPIs(&newVMI.Spec, admitter.clusterConfig),
}
}
func admitVMILabelsUpdate(
newVMI *v1.VirtualMachineInstance,
oldVMI *v1.VirtualMachineInstance,
) *admissionv1.AdmissionResponse {
oldLabels := filterKubevirtLabels(oldVMI.ObjectMeta.Labels)
newLabels := filterKubevirtLabels(newVMI.ObjectMeta.Labels)
if !equality.Semantic.DeepEqual(oldLabels, newLabels) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: "modification of the following reserved kubevirt.io/ labels on a VMI object is prohibited",
},
})
}
return nil
}
func filterKubevirtLabels(labels map[string]string) map[string]string {
m := make(map[string]string)
if len(labels) == 0 {
// Return the empty map to avoid edge cases
return m
}
for label, value := range labels {
if _, ok := restrictedVmiLabels[label]; ok {
m[label] = value
}
}
return m
}
func admitHotplug(
oldVMI, newVMI *v1.VirtualMachineInstance,
clusterConfig *virtconfig.ClusterConfig,
) *admissionv1.AdmissionResponse {
if response := admitHotplugCPU(oldVMI.Spec.Domain.CPU, newVMI.Spec.Domain.CPU); response != nil {
return response
}
if response := admitHotplugMemory(oldVMI.Spec.Domain.Memory, newVMI.Spec.Domain.Memory); response != nil {
return response
}
if response := storageadmitters.AdmitUtilityVolumes(&newVMI.Spec, &oldVMI.Spec, oldVMI.Status.VolumeStatus, clusterConfig); response != nil {
return response
}
return storageadmitters.AdmitHotplugStorage(
newVMI.Spec.Volumes,
oldVMI.Spec.Volumes,
newVMI.Spec.Domain.Devices.Disks,
oldVMI.Spec.Domain.Devices.Disks,
oldVMI.Status.VolumeStatus,
newVMI,
clusterConfig)
}
func admitHotplugCPU(oldCPUTopology, newCPUTopology *v1.CPU) *admissionv1.AdmissionResponse {
if oldCPUTopology.MaxSockets != newCPUTopology.MaxSockets {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "CPU topology maxSockets changed",
},
})
}
return nil
}
func admitHotplugMemory(oldMemory, newMemory *v1.Memory) *admissionv1.AdmissionResponse {
if oldMemory == nil ||
oldMemory.MaxGuest == nil ||
newMemory == nil ||
newMemory.MaxGuest == nil {
return nil
}
if !oldMemory.MaxGuest.Equal(*newMemory.MaxGuest) {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "Memory maxGuest changed",
},
})
}
return nil
}
func hasRequestOriginatedFromVirtHandler(requestUsername string, kubeVirtServiceAccounts map[string]struct{}) bool {
if _, isKubeVirtServiceAccount := kubeVirtServiceAccounts[requestUsername]; isKubeVirtServiceAccount {
return strings.HasSuffix(requestUsername, components.HandlerServiceAccountName)
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
v1 "kubevirt.io/api/core/v1"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
"kubevirt.io/kubevirt/pkg/virt-api/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
type VMIRSAdmitter struct {
ClusterConfig *virtconfig.ClusterConfig
}
func (admitter *VMIRSAdmitter) Admit(_ context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if !webhookutils.ValidateRequestResource(ar.Request.Resource, webhooks.VirtualMachineInstanceReplicaSetGroupVersionResource.Group, webhooks.VirtualMachineInstanceReplicaSetGroupVersionResource.Resource) {
err := fmt.Errorf("expect resource to be '%s'", webhooks.VirtualMachineInstanceReplicaSetGroupVersionResource.Resource)
return webhookutils.ToAdmissionResponseError(err)
}
if resp := webhookutils.ValidateSchema(v1.VirtualMachineInstanceReplicaSetGroupVersionKind, ar.Request.Object.Raw); resp != nil {
return resp
}
raw := ar.Request.Object.Raw
vmirs := v1.VirtualMachineInstanceReplicaSet{}
err := json.Unmarshal(raw, &vmirs)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
causes := ValidateVMIRSSpec(k8sfield.NewPath("spec"), &vmirs.Spec, admitter.ClusterConfig)
if ar.Request.Operation == admissionv1.Create {
clusterCfg := admitter.ClusterConfig.GetConfig()
if devCfg := clusterCfg.DeveloperConfiguration; devCfg != nil {
causes = append(causes, featuregate.ValidateFeatureGates(devCfg.FeatureGates, &vmirs.Spec.Template.Spec)...)
}
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
return &admissionv1.AdmissionResponse{
Allowed: true,
Warnings: warnDeprecatedAPIs(&vmirs.Spec.Template.Spec, admitter.ClusterConfig),
}
}
func ValidateVMIRSSpec(field *k8sfield.Path, spec *v1.VirtualMachineInstanceReplicaSetSpec, config *virtconfig.ClusterConfig) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Template == nil {
return append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("missing virtual machine template."),
Field: field.Child("template").String(),
})
}
causes = append(causes, ValidateVirtualMachineInstanceSpec(field.Child("template", "spec"), &spec.Template.Spec, config)...)
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: err.Error(),
Field: field.Child("selector").String(),
})
} else if !selector.Matches(labels.Set(spec.Template.ObjectMeta.Labels)) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("selector does not match labels."),
Field: field.Child("selector").String(),
})
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
poolv1 "kubevirt.io/api/pool/v1beta1"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
"kubevirt.io/kubevirt/pkg/virt-api/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
type VMPoolAdmitter struct {
ClusterConfig *virtconfig.ClusterConfig
KubeVirtServiceAccounts map[string]struct{}
}
func (admitter *VMPoolAdmitter) Admit(_ context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request == nil {
err := fmt.Errorf("Empty request for virtual machine pool validation")
return webhookutils.ToAdmissionResponseError(err)
} else if ar.Request.Resource.Resource != webhooks.VirtualMachinePoolGroupVersionResource.Resource {
err := fmt.Errorf("expect resource [%s], but got [%s]", ar.Request.Resource.Resource, webhooks.VirtualMachinePoolGroupVersionResource.Resource)
return webhookutils.ToAdmissionResponseError(err)
} else if ar.Request.Resource.Group != webhooks.VirtualMachinePoolGroupVersionResource.Group {
err := fmt.Errorf("expect resource group [%s], but got [%s]", ar.Request.Resource.Group, webhooks.VirtualMachinePoolGroupVersionResource.Group)
return webhookutils.ToAdmissionResponseError(err)
} else if ar.Request.Resource.Version != webhooks.VirtualMachinePoolGroupVersionResource.Version {
err := fmt.Errorf("expect resource version [%s], but got [%s]", ar.Request.Resource.Version, webhooks.VirtualMachinePoolGroupVersionResource.Version)
return webhookutils.ToAdmissionResponseError(err)
}
gvk := schema.GroupVersionKind{
Group: webhooks.VirtualMachinePoolGroupVersionResource.Group,
Version: webhooks.VirtualMachinePoolGroupVersionResource.Version,
Kind: poolv1.VirtualMachinePoolKind,
}
if resp := webhookutils.ValidateSchema(gvk, ar.Request.Object.Raw); resp != nil {
return resp
}
raw := ar.Request.Object.Raw
pool := poolv1.VirtualMachinePool{}
err := json.Unmarshal(raw, &pool)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
_, isKubeVirtServiceAccount := admitter.KubeVirtServiceAccounts[ar.Request.UserInfo.Username]
causes := ValidateVMPoolSpec(ar, k8sfield.NewPath("spec"), &pool, admitter.ClusterConfig, isKubeVirtServiceAccount)
if ar.Request.Operation == admissionv1.Create {
clusterCfg := admitter.ClusterConfig.GetConfig()
if devCfg := clusterCfg.DeveloperConfiguration; devCfg != nil {
causes = append(
causes,
featuregate.ValidateFeatureGates(devCfg.FeatureGates, &pool.Spec.VirtualMachineTemplate.Spec.Template.Spec)...,
)
}
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
return &admissionv1.AdmissionResponse{
Allowed: true,
Warnings: warnDeprecatedAPIs(&pool.Spec.VirtualMachineTemplate.Spec.Template.Spec, admitter.ClusterConfig),
}
}
func ValidateVMPoolSpec(ar *admissionv1.AdmissionReview, field *k8sfield.Path, pool *poolv1.VirtualMachinePool, config *virtconfig.ClusterConfig, isKubeVirtServiceAccount bool) []metav1.StatusCause {
var causes []metav1.StatusCause
spec := &pool.Spec
if spec.VirtualMachineTemplate == nil {
return append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: "missing virtual machine template.",
Field: field.Child("template").String(),
})
}
causes = append(causes, ValidateVirtualMachineSpec(field.Child("virtualMachineTemplate", "spec"), &spec.VirtualMachineTemplate.Spec, config, isKubeVirtServiceAccount)...)
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: err.Error(),
Field: field.Child("selector").String(),
})
} else if !selector.Matches(labels.Set(spec.VirtualMachineTemplate.ObjectMeta.Labels)) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "selector does not match labels.",
Field: field.Child("selector").String(),
})
}
if spec.MaxUnavailable != nil {
if spec.MaxUnavailable.Type == intstr.String {
if !strings.HasSuffix(spec.MaxUnavailable.StrVal, "%") {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "maxUnavailable percentage must end with %",
Field: field.Child("maxUnavailable").String(),
})
} else {
percentage := strings.TrimSuffix(spec.MaxUnavailable.StrVal, "%")
if val, err := strconv.Atoi(percentage); err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("maxUnavailable percentage value %q is invalid: %v", percentage, err),
Field: field.Child("maxUnavailable").String(),
})
} else if val <= 0 || val > 100 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("maxUnavailable percentage value %d must be between 1 and 100", val),
Field: field.Child("maxUnavailable").String(),
})
}
}
} else if spec.MaxUnavailable.Type == intstr.Int {
if spec.MaxUnavailable.IntVal <= 0 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "maxUnavailable must be greater than 0",
Field: field.Child("maxUnavailable").String(),
})
}
}
}
if spec.UpdateStrategy != nil {
causes = append(causes, validateUpdateStrategyMutualExclusivity(field, spec.UpdateStrategy)...)
}
if spec.ScaleInStrategy != nil {
causes = append(causes, validateScaleInStrategyMutualExclusivity(field, spec.ScaleInStrategy)...)
}
if ar.Request.Operation == admissionv1.Update {
oldPool := &poolv1.VirtualMachinePool{}
if err := json.Unmarshal(ar.Request.OldObject.Raw, oldPool); err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeUnexpectedServerResponse,
Message: "Could not fetch old vmpool",
})
}
if !equality.Semantic.DeepEqual(pool.Spec.Selector, oldPool.Spec.Selector) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "selector is immutable after creation.",
Field: field.Child("selector").String(),
})
}
}
return causes
}
func validateUpdateStrategyMutualExclusivity(field *k8sfield.Path, strategy *poolv1.VirtualMachinePoolUpdateStrategy) []metav1.StatusCause {
mutualExclusivity := map[string]bool{
"unmanaged": strategy.Unmanaged != nil,
"opportunistic": strategy.Opportunistic != nil,
"proactive": strategy.Proactive != nil,
}
return validateMutualExclusivity(field.Child("updateStrategy"), mutualExclusivity)
}
func validateScaleInStrategyMutualExclusivity(field *k8sfield.Path, strategy *poolv1.VirtualMachinePoolScaleInStrategy) []metav1.StatusCause {
mutualExclusivity := map[string]bool{
"unmanaged": strategy.Unmanaged != nil,
"opportunistic": strategy.Opportunistic != nil,
"proactive": strategy.Proactive != nil,
}
return validateMutualExclusivity(field.Child("scaleInStrategy"), mutualExclusivity)
}
func validateMutualExclusivity(field *k8sfield.Path, strategies map[string]bool) []metav1.StatusCause {
var configured []string
for name, isSet := range strategies {
if isSet {
configured = append(configured, name)
}
}
if len(configured) > 1 {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("only one strategy can be configured at a time: but found %s strategies", strings.Join(configured, ", ")),
Field: field.String(),
},
}
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/defaults"
"kubevirt.io/kubevirt/pkg/instancetype/conflict"
instancetypeWebhooks "kubevirt.io/kubevirt/pkg/instancetype/webhooks/vm"
"kubevirt.io/kubevirt/pkg/liveupdate/memory"
metrics "kubevirt.io/kubevirt/pkg/monitoring/metrics/virt-api"
netadmitter "kubevirt.io/kubevirt/pkg/network/admitter"
storageadmitters "kubevirt.io/kubevirt/pkg/storage/admitters"
migrationutil "kubevirt.io/kubevirt/pkg/util/migrations"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
"kubevirt.io/kubevirt/pkg/virt-api/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
var validRunStrategies = []v1.VirtualMachineRunStrategy{v1.RunStrategyHalted, v1.RunStrategyManual, v1.RunStrategyAlways, v1.RunStrategyRerunOnFailure, v1.RunStrategyOnce}
type instancetypeVMsAdmitter interface {
ApplyToVM(vm *v1.VirtualMachine) (
*instancetypev1beta1.VirtualMachineInstancetypeSpec,
*instancetypev1beta1.VirtualMachinePreferenceSpec,
[]metav1.StatusCause,
)
Check(*instancetypev1beta1.VirtualMachineInstancetypeSpec,
*instancetypev1beta1.VirtualMachinePreferenceSpec,
*v1.VirtualMachineInstanceSpec,
) (conflict.Conflicts, error)
}
type VMsAdmitter struct {
VirtClient kubecli.KubevirtClient
DataSourceInformer cache.SharedIndexInformer
NamespaceInformer cache.SharedIndexInformer
InstancetypeAdmitter instancetypeVMsAdmitter
ClusterConfig *virtconfig.ClusterConfig
KubeVirtServiceAccounts map[string]struct{}
}
func NewVMsAdmitter(clusterConfig *virtconfig.ClusterConfig, client kubecli.KubevirtClient, informers *webhooks.Informers, kubeVirtServiceAccounts map[string]struct{}) *VMsAdmitter {
return &VMsAdmitter{
VirtClient: client,
DataSourceInformer: informers.DataSourceInformer,
NamespaceInformer: informers.NamespaceInformer,
InstancetypeAdmitter: instancetypeWebhooks.NewAdmitter(client),
ClusterConfig: clusterConfig,
KubeVirtServiceAccounts: kubeVirtServiceAccounts,
}
}
func (admitter *VMsAdmitter) Admit(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if !webhookutils.ValidateRequestResource(ar.Request.Resource, webhooks.VirtualMachineGroupVersionResource.Group, webhooks.VirtualMachineGroupVersionResource.Resource) {
err := fmt.Errorf("expect resource to be '%s'", webhooks.VirtualMachineGroupVersionResource.Resource)
return webhookutils.ToAdmissionResponseError(err)
}
if resp := webhookutils.ValidateSchema(v1.VirtualMachineGroupVersionKind, ar.Request.Object.Raw); resp != nil {
return resp
}
raw := ar.Request.Object.Raw
vm := v1.VirtualMachine{}
err := json.Unmarshal(raw, &vm)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
// If the VirtualMachine is being deleted return early and avoid racing any other in-flight resource deletions that might be happening
if vm.DeletionTimestamp != nil {
return &admissionv1.AdmissionResponse{
Allowed: true,
}
}
// We apply any referenced instancetype and preferences early here to the VirtualMachine in order to
// validate the resulting VirtualMachineInstanceSpec below. As we don't want to persist these changes
// we pass a copy of the original VirtualMachine here and to the validation call below.
vmCopy := vm.DeepCopy()
instancetypeSpec, preferenceSpec, causes := admitter.InstancetypeAdmitter.ApplyToVM(vmCopy)
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
// Set VirtualMachine defaults on the copy before validating
if err = defaults.SetDefaultVirtualMachineInstanceSpec(admitter.ClusterConfig, &vmCopy.Spec.Template.Spec); err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
// With the defaults now set we can check that the VM meets the requirements of any provided preference
if conflicts, err := admitter.InstancetypeAdmitter.Check(instancetypeSpec, preferenceSpec, &vmCopy.Spec.Template.Spec); err != nil {
return webhookutils.ToAdmissionResponse([]metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueNotFound,
Message: fmt.Sprintf("failure checking preference requirements: %v", err),
Field: conflicts.String(),
}})
}
if ar.Request.Operation == admissionv1.Create {
clusterCfg := admitter.ClusterConfig.GetConfig()
if devCfg := clusterCfg.DeveloperConfiguration; devCfg != nil {
if causes = featuregate.ValidateFeatureGates(devCfg.FeatureGates, &vm.Spec.Template.Spec); len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
}
netValidator := netadmitter.NewValidator(k8sfield.NewPath("spec"), &vmCopy.Spec.Template.Spec, admitter.ClusterConfig)
if causes = netValidator.ValidateCreation(); len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
}
_, isKubeVirtServiceAccount := admitter.KubeVirtServiceAccounts[ar.Request.UserInfo.Username]
causes = ValidateVirtualMachineSpec(k8sfield.NewPath("spec"), &vmCopy.Spec, admitter.ClusterConfig, isKubeVirtServiceAccount)
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
causes, err = storageadmitters.Admit(admitter.VirtClient, ctx, ar.Request, &vm, admitter.ClusterConfig)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
causes, err = admitter.validateVolumeRequests(ctx, &vm)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
} else if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
isDryRun := ar.Request.DryRun != nil && *ar.Request.DryRun
if !isDryRun && ar.Request.Operation == admissionv1.Create {
metrics.NewVMCreated(&vm)
}
warnings := warnDeprecatedAPIs(&vm.Spec.Template.Spec, admitter.ClusterConfig)
if vm.Spec.Running != nil {
warnings = append(warnings, "spec.running is deprecated, please use spec.runStrategy instead.")
}
return &admissionv1.AdmissionResponse{
Allowed: true,
Warnings: warnings,
}
}
func (admitter *VMsAdmitter) AdmitStatus(ctx context.Context, ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
vm, _, err := webhookutils.GetVMFromAdmissionReview(ar)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
causes, err := admitter.validateVolumeRequests(ctx, vm)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
} else if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
causes = storageadmitters.AdmitStatus(admitter.VirtClient, ctx, ar.Request, vm, admitter.ClusterConfig)
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{}
reviewResponse.Allowed = true
return &reviewResponse
}
func ValidateVirtualMachineSpec(field *k8sfield.Path, spec *v1.VirtualMachineSpec, config *virtconfig.ClusterConfig, isKubeVirtServiceAccount bool) []metav1.StatusCause {
var causes []metav1.StatusCause
if spec.Template == nil {
return append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("missing virtual machine template."),
Field: field.Child("template").String(),
})
}
causes = append(causes, ValidateVirtualMachineInstanceMetadata(field.Child("template", "metadata"), &spec.Template.ObjectMeta, config, isKubeVirtServiceAccount)...)
causes = append(causes, ValidateVirtualMachineInstanceSpec(field.Child("template", "spec"), &spec.Template.Spec, config)...)
causes = append(causes, storageadmitters.ValidateDataVolumeTemplate(field, spec)...)
causes = append(causes, validateRunStrategy(field, spec, config)...)
causes = append(causes, validateLiveUpdateFeatures(field, spec, config)...)
return causes
}
func validateRunStrategy(field *k8sfield.Path, spec *v1.VirtualMachineSpec, config *virtconfig.ClusterConfig) (causes []metav1.StatusCause) {
if spec.Running != nil && spec.RunStrategy != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Running and RunStrategy are mutually exclusive. Note that Running is deprecated, please use RunStrategy instead"),
Field: field.Child("running").String(),
})
}
if spec.Running == nil && spec.RunStrategy == nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("RunStrategy must be specified"),
Field: field.Child("running").String(),
})
}
if spec.RunStrategy != nil {
if *spec.RunStrategy == v1.RunStrategyWaitAsReceiver {
if config.DecentralizedLiveMigrationEnabled() {
// Only allow wait as receiver if feature gate is set.
return
}
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Invalid RunStrategy (%s), %s feature gate is not enabled in kubevirt resource", *spec.RunStrategy, featuregate.DecentralizedLiveMigration),
Field: field.Child("runStrategy").String(),
})
} else {
validRunStrategy := false
for _, strategy := range validRunStrategies {
if *spec.RunStrategy == strategy {
validRunStrategy = true
break
}
}
if !validRunStrategy {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Invalid RunStrategy (%s)", *spec.RunStrategy),
Field: field.Child("runStrategy").String(),
})
}
}
}
return causes
}
func validateLiveUpdateFeatures(field *k8sfield.Path, spec *v1.VirtualMachineSpec, config *virtconfig.ClusterConfig) (causes []metav1.StatusCause) {
if !config.IsVMRolloutStrategyLiveUpdate() {
return causes
}
if spec.Template.Spec.Domain.CPU != nil {
causes = append(causes, validateLiveUpdateCPU(field, &spec.Template.Spec.Domain)...)
}
if spec.Template.Spec.Domain.Memory != nil && spec.Template.Spec.Domain.Memory.MaxGuest != nil {
if err := memory.ValidateLiveUpdateMemory(&spec.Template.Spec, spec.Template.Spec.Domain.Memory.MaxGuest); err != nil {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: err.Error(),
Field: field.Child("template", "spec", "domain", "memory", "guest").String(),
})
}
}
return causes
}
func validateLiveUpdateCPU(field *k8sfield.Path, domain *v1.DomainSpec) (causes []metav1.StatusCause) {
if domain.CPU.Sockets != 0 && domain.CPU.MaxSockets != 0 && domain.CPU.Sockets > domain.CPU.MaxSockets {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("Number of sockets in CPU topology is greater than the maximum sockets allowed"),
Field: field.Child("template", "spec", "domain", "cpu", "sockets").String(),
})
}
return causes
}
func (admitter *VMsAdmitter) validateVolumeRequests(ctx context.Context, vm *v1.VirtualMachine) ([]metav1.StatusCause, error) {
if len(vm.Status.VolumeRequests) == 0 {
return nil, nil
}
curVMAddRequestsMap := make(map[string]*v1.VirtualMachineVolumeRequest)
curVMRemoveRequestsMap := make(map[string]*v1.VirtualMachineVolumeRequest)
vmVolumeMap := make(map[string]v1.Volume)
vmiVolumeMap := make(map[string]v1.Volume)
vmi := &v1.VirtualMachineInstance{}
vmiExists := false
// get VMI if vm is active
if vm.Status.Ready {
var err error
vmi, err = admitter.VirtClient.VirtualMachineInstance(vm.Namespace).Get(ctx, vm.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, err
} else if err == nil && vmi.DeletionTimestamp == nil {
// ignore validating the vmi if it is being deleted
vmiExists = true
}
}
if vmiExists {
for _, volume := range vmi.Spec.Volumes {
vmiVolumeMap[volume.Name] = volume
}
}
for _, volume := range vm.Spec.Template.Spec.Volumes {
vmVolumeMap[volume.Name] = volume
}
newSpec := vm.Spec.Template.Spec.DeepCopy()
for _, volumeRequest := range vm.Status.VolumeRequests {
volumeRequest := volumeRequest
name := ""
if volumeRequest.AddVolumeOptions != nil && volumeRequest.RemoveVolumeOptions != nil {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "VolumeRequests require either addVolumeOptions or removeVolumeOptions to be set, not both",
Field: k8sfield.NewPath("Status", "volumeRequests").String(),
}}, nil
} else if volumeRequest.AddVolumeOptions != nil {
name = volumeRequest.AddVolumeOptions.Name
_, ok := curVMAddRequestsMap[name]
if ok {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("AddVolume request for [%s] aleady exists", name),
Field: k8sfield.NewPath("Status", "volumeRequests").String(),
}}, nil
}
// Validate the disk is configured properly
invalidDiskStatusCause := storageadmitters.ValidateHotplugDiskConfiguration(
volumeRequest.AddVolumeOptions.Disk, name,
"AddVolume request",
k8sfield.NewPath("Status", "volumeRequests").String(),
)
if invalidDiskStatusCause != nil {
return invalidDiskStatusCause, nil
}
newVolume := v1.Volume{
Name: volumeRequest.AddVolumeOptions.Name,
}
if volumeRequest.AddVolumeOptions.VolumeSource.PersistentVolumeClaim != nil {
newVolume.VolumeSource.PersistentVolumeClaim = volumeRequest.AddVolumeOptions.VolumeSource.PersistentVolumeClaim
} else if volumeRequest.AddVolumeOptions.VolumeSource.DataVolume != nil {
newVolume.VolumeSource.DataVolume = volumeRequest.AddVolumeOptions.VolumeSource.DataVolume
}
vmVolume, ok := vmVolumeMap[name]
if ok && !equality.Semantic.DeepEqual(newVolume, vmVolume) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("AddVolume request for [%s] conflicts with an existing volume of the same name on the vmi template.", name),
Field: k8sfield.NewPath("Status", "volumeRequests").String(),
}}, nil
}
vmiVolume, ok := vmiVolumeMap[name]
if ok && !equality.Semantic.DeepEqual(newVolume, vmiVolume) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("AddVolume request for [%s] conflicts with an existing volume of the same name on currently running vmi", name),
Field: k8sfield.NewPath("Status", "volumeRequests").String(),
}}, nil
}
curVMAddRequestsMap[name] = &volumeRequest
} else if volumeRequest.RemoveVolumeOptions != nil {
name = volumeRequest.RemoveVolumeOptions.Name
_, ok := curVMRemoveRequestsMap[name]
if ok {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("RemoveVolume request for [%s] aleady exists", name),
Field: k8sfield.NewPath("Status", "volumeRequests").String(),
}}, nil
}
curVMRemoveRequestsMap[name] = &volumeRequest
} else {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "VolumeRequests require one of either addVolumeOptions or removeVolumeOptions to be set",
Field: k8sfield.NewPath("Status", "volumeRequests").String(),
}}, nil
}
newSpec = controller.ApplyVolumeRequestOnVMISpec(newSpec, &volumeRequest)
if vmiExists {
vmi.Spec = *controller.ApplyVolumeRequestOnVMISpec(&vmi.Spec, &volumeRequest)
}
}
// this simulates injecting the changes into the VMI template and validates it will work.
causes := ValidateVirtualMachineInstanceSpec(k8sfield.NewPath("spec", "template", "spec"), newSpec, admitter.ClusterConfig)
if len(causes) > 0 {
return causes, nil
}
// This simulates injecting the changes directly into the vmi, if the vmi exists
if vmiExists {
causes := ValidateVirtualMachineInstanceSpec(k8sfield.NewPath("spec", "template", "spec"), &vmi.Spec, admitter.ClusterConfig)
if len(causes) > 0 {
return causes, nil
}
if migrationutil.IsMigrating(vmi) {
return []metav1.StatusCause{{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fmt.Sprintf("Cannot handle volume requests while VMI migration is in progress"),
Field: k8sfield.NewPath("spec").String(),
}}, nil
}
}
return nil, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virtconfig
import (
"encoding/json"
"fmt"
"runtime"
"strings"
"sync"
k8sv1 "k8s.io/api/core/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/pointer"
)
const (
NodeDrainTaintDefaultKey = "kubevirt.io/drain"
)
type ConfigModifiedFn func()
// NewClusterConfig is a wrapper of NewClusterConfigWithCPUArch with default cpuArch.
func NewClusterConfig(crdInformer cache.SharedIndexInformer,
kubeVirtInformer cache.SharedIndexInformer,
namespace string) (*ClusterConfig, error) {
return NewClusterConfigWithCPUArch(
crdInformer,
kubeVirtInformer,
namespace,
runtime.GOARCH,
)
}
// NewClusterConfigWithCPUArch represents the `kubevirt-config` config map. It can be used to live-update
// values if the config changes. The config update works like this:
// 1. Check if the config exists. If it does not exist, return the default config
// 2. Check if the config got updated. If so, try to parse and return it
// 3. In case of errors or no updates (resource version stays the same), it returns the values from the last good config
func NewClusterConfigWithCPUArch(crdInformer cache.SharedIndexInformer,
kubeVirtInformer cache.SharedInformer,
namespace, cpuArch string) (*ClusterConfig, error) {
defaultConfig := defaultClusterConfig(cpuArch)
c := &ClusterConfig{
crdStore: crdInformer.GetStore(),
kubeVirtStore: kubeVirtInformer.GetStore(),
cpuArch: cpuArch,
lock: &sync.Mutex{},
namespace: namespace,
lastValidConfig: defaultConfig,
defaultConfig: defaultConfig,
}
_, err := crdInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.crdAddedDeleted,
DeleteFunc: c.crdAddedDeleted,
UpdateFunc: c.crdUpdated,
})
if err != nil {
return nil, err
}
_, err = kubeVirtInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.configAddedDeleted,
UpdateFunc: c.configUpdated,
})
if err != nil {
return nil, err
}
return c, nil
}
func (c *ClusterConfig) configAddedDeleted(_ interface{}) {
go c.GetConfig()
c.lock.Lock()
defer c.lock.Unlock()
if c.configModifiedCallback != nil {
for _, callback := range c.configModifiedCallback {
go callback()
}
}
}
func (c *ClusterConfig) configUpdated(_, _ interface{}) {
go c.GetConfig()
c.lock.Lock()
defer c.lock.Unlock()
if c.configModifiedCallback != nil {
for _, callback := range c.configModifiedCallback {
go callback()
}
}
}
func isDataVolumeCrd(crd *extv1.CustomResourceDefinition) bool {
return crd.Spec.Names.Kind == "DataVolume"
}
func isDataSourceCrd(crd *extv1.CustomResourceDefinition) bool {
return crd.Spec.Names.Kind == "DataSource"
}
func isServiceMonitor(crd *extv1.CustomResourceDefinition) bool {
return crd.Spec.Names.Kind == "ServiceMonitor"
}
func isPrometheusRules(crd *extv1.CustomResourceDefinition) bool {
return crd.Spec.Names.Kind == "PrometheusRule"
}
func (c *ClusterConfig) crdAddedDeleted(obj interface{}) {
go c.GetConfig()
crd := obj.(*extv1.CustomResourceDefinition)
if !isDataVolumeCrd(crd) && !isDataSourceCrd(crd) &&
!isServiceMonitor(crd) && !isPrometheusRules(crd) {
return
}
c.lock.Lock()
defer c.lock.Unlock()
if c.configModifiedCallback != nil {
for _, callback := range c.configModifiedCallback {
go callback()
}
}
}
func (c *ClusterConfig) crdUpdated(_, cur interface{}) {
c.crdAddedDeleted(cur)
}
func defaultClusterConfig(cpuArch string) *v1.KubeVirtConfiguration {
parallelOutboundMigrationsPerNodeDefault := ParallelOutboundMigrationsPerNodeDefault
parallelMigrationsPerClusterDefault := ParallelMigrationsPerClusterDefault
bandwidthPerMigrationDefault := resource.MustParse(BandwidthPerMigrationDefault)
nodeDrainTaintDefaultKey := NodeDrainTaintDefaultKey
allowAutoConverge := MigrationAllowAutoConverge
allowPostCopy := MigrationAllowPostCopy
defaultUnsafeMigrationOverride := DefaultUnsafeMigrationOverride
progressTimeout := MigrationProgressTimeout
completionTimeoutPerGiB := MigrationCompletionTimeoutPerGiB
utilityVolumesTimeout := MigrationUtilityVolumesTimeoutSeconds
cpuRequestDefault := resource.MustParse(DefaultCPURequest)
nodeSelectorsDefault, _ := parseNodeSelectors(DefaultNodeSelectors)
defaultNetworkInterface := DefaultNetworkInterface
defaultMemBalloonStatsPeriod := DefaultMemBalloonStatsPeriod
SmbiosDefaultConfig := &v1.SMBiosConfiguration{
Family: SmbiosConfigDefaultFamily,
Manufacturer: SmbiosConfigDefaultManufacturer,
Product: SmbiosConfigDefaultProduct,
}
supportedQEMUGuestAgentVersions := strings.Split(strings.TrimRight(SupportedGuestAgentVersions, ","), ",")
defaultDiskVerification := &v1.DiskVerification{
MemoryLimit: resource.NewQuantity(DefaultDiskVerificationMemoryLimitBytes, resource.BinarySI),
}
defaultEvictionStrategy := v1.EvictionStrategyNone
return &v1.KubeVirtConfiguration{
ImagePullPolicy: DefaultImagePullPolicy,
DeveloperConfiguration: &v1.DeveloperConfiguration{
UseEmulation: DefaultAllowEmulation,
MemoryOvercommit: DefaultMemoryOvercommit,
LessPVCSpaceToleration: DefaultLessPVCSpaceToleration,
MinimumReservePVCBytes: DefaultMinimumReservePVCBytes,
NodeSelectors: nodeSelectorsDefault,
CPUAllocationRatio: DefaultCPUAllocationRatio,
DiskVerification: defaultDiskVerification,
LogVerbosity: &v1.LogVerbosity{
VirtAPI: DefaultVirtAPILogVerbosity,
VirtOperator: DefaultVirtOperatorLogVerbosity,
VirtController: DefaultVirtControllerLogVerbosity,
VirtHandler: DefaultVirtHandlerLogVerbosity,
VirtLauncher: DefaultVirtLauncherLogVerbosity,
},
},
EvictionStrategy: &defaultEvictionStrategy,
MigrationConfiguration: &v1.MigrationConfiguration{
ParallelMigrationsPerCluster: ¶llelMigrationsPerClusterDefault,
ParallelOutboundMigrationsPerNode: ¶llelOutboundMigrationsPerNodeDefault,
NodeDrainTaintKey: &nodeDrainTaintDefaultKey,
BandwidthPerMigration: &bandwidthPerMigrationDefault,
ProgressTimeout: &progressTimeout,
CompletionTimeoutPerGiB: &completionTimeoutPerGiB,
UtilityVolumesTimeout: &utilityVolumesTimeout,
UnsafeMigrationOverride: &defaultUnsafeMigrationOverride,
AllowAutoConverge: &allowAutoConverge,
AllowPostCopy: &allowPostCopy,
},
CPURequest: &cpuRequestDefault,
NetworkConfiguration: &v1.NetworkConfiguration{
NetworkInterface: defaultNetworkInterface,
DeprecatedPermitSlirpInterface: pointer.P(DefaultPermitSlirpInterface),
PermitBridgeInterfaceOnPodNetwork: pointer.P(DefaultPermitBridgeInterfaceOnPodNetwork),
},
SMBIOSConfig: SmbiosDefaultConfig,
SELinuxLauncherType: DefaultSELinuxLauncherType,
SupportedGuestAgentVersions: supportedQEMUGuestAgentVersions,
MemBalloonStatsPeriod: &defaultMemBalloonStatsPeriod,
APIConfiguration: &v1.ReloadableComponentConfiguration{
RestClient: &v1.RESTClientConfiguration{RateLimiter: &v1.RateLimiter{TokenBucketRateLimiter: &v1.TokenBucketRateLimiter{
QPS: DefaultVirtAPIQPS,
Burst: DefaultVirtAPIBurst,
}}},
},
ControllerConfiguration: &v1.ReloadableComponentConfiguration{
RestClient: &v1.RESTClientConfiguration{RateLimiter: &v1.RateLimiter{TokenBucketRateLimiter: &v1.TokenBucketRateLimiter{
QPS: DefaultVirtControllerQPS,
Burst: DefaultVirtControllerBurst,
}}},
},
HandlerConfiguration: &v1.ReloadableComponentConfiguration{
RestClient: &v1.RESTClientConfiguration{RateLimiter: &v1.RateLimiter{TokenBucketRateLimiter: &v1.TokenBucketRateLimiter{
QPS: DefaultVirtHandlerQPS,
Burst: DefaultVirtHandlerBurst,
}}},
},
WebhookConfiguration: &v1.ReloadableComponentConfiguration{
RestClient: &v1.RESTClientConfiguration{RateLimiter: &v1.RateLimiter{TokenBucketRateLimiter: &v1.TokenBucketRateLimiter{
QPS: DefaultVirtWebhookClientQPS,
Burst: DefaultVirtWebhookClientBurst,
}}},
},
ArchitectureConfiguration: &v1.ArchConfiguration{
Amd64: &v1.ArchSpecificConfiguration{
OVMFPath: DefaultARCHOVMFPath,
EmulatedMachines: strings.Split(DefaultAMD64EmulatedMachines, ","),
MachineType: DefaultAMD64MachineType,
},
Arm64: &v1.ArchSpecificConfiguration{
OVMFPath: DefaultAARCH64OVMFPath,
EmulatedMachines: strings.Split(DefaultAARCH64EmulatedMachines, ","),
MachineType: DefaultAARCH64MachineType,
},
S390x: &v1.ArchSpecificConfiguration{
OVMFPath: DefaultS390xOVMFPath,
EmulatedMachines: strings.Split(DefaultS390XEmulatedMachines, ","),
MachineType: DefaultS390XMachineType,
},
DefaultArchitecture: runtime.GOARCH,
},
LiveUpdateConfiguration: &v1.LiveUpdateConfiguration{
MaxHotplugRatio: DefaultMaxHotplugRatio,
},
VMRolloutStrategy: pointer.P(DefaultVMRolloutStrategy),
}
}
type ClusterConfig struct {
crdStore cache.Store
kubeVirtStore cache.Store
namespace string
cpuArch string
lock *sync.Mutex
lastValidConfig *v1.KubeVirtConfiguration
defaultConfig *v1.KubeVirtConfiguration
lastInvalidConfigResourceVersion string
lastValidConfigResourceVersion string
configModifiedCallback []ConfigModifiedFn
}
func (c *ClusterConfig) SetConfigModifiedCallback(cb ConfigModifiedFn) {
c.lock.Lock()
defer c.lock.Unlock()
c.configModifiedCallback = append(c.configModifiedCallback, cb)
go cb()
}
func setConfigFromKubeVirt(config *v1.KubeVirtConfiguration, kv *v1.KubeVirt) error {
kvConfig := &kv.Spec.Configuration
overrides, err := json.Marshal(kvConfig)
if err != nil {
return err
}
err = json.Unmarshal(overrides, &config)
if err != nil {
return err
}
if config.ArchitectureConfiguration == nil {
config.ArchitectureConfiguration = &v1.ArchConfiguration{}
}
// set default architecture from status of CR
config.ArchitectureConfiguration.DefaultArchitecture = kv.Status.DefaultArchitecture
return validateConfig(config)
}
// getConfig returns the latest valid parsed config map result, or updates it
// if a newer version is available.
// XXX Rework this, to happen mostly in informer callbacks.
// This will also allow us then to react to config changes and e.g. restart some controllers
func (c *ClusterConfig) GetConfig() (config *v1.KubeVirtConfiguration) {
c.lock.Lock()
defer c.lock.Unlock()
kv := c.GetConfigFromKubeVirtCR()
if kv == nil {
return c.lastValidConfig
}
resourceVersion := kv.ResourceVersion
// if there is a configuration config map present we should use its configuration
// and ignore configuration in kubevirt
if c.lastValidConfigResourceVersion == resourceVersion ||
c.lastInvalidConfigResourceVersion == resourceVersion {
return c.lastValidConfig
}
config = defaultClusterConfig(c.cpuArch)
err := setConfigFromKubeVirt(config, kv)
if err != nil {
c.lastInvalidConfigResourceVersion = resourceVersion
log.DefaultLogger().Reason(err).Errorf("Invalid cluster config using KubeVirt resource version '%s', falling back to last good resource version '%s'", resourceVersion, c.lastValidConfigResourceVersion)
return c.lastValidConfig
}
log.DefaultLogger().Infof("Updating cluster config from KubeVirt to resource version '%s'", resourceVersion)
c.lastValidConfigResourceVersion = resourceVersion
c.lastValidConfig = config
return c.lastValidConfig
}
func (c *ClusterConfig) GetConfigFromKubeVirtCR() *v1.KubeVirt {
objects := c.kubeVirtStore.List()
var kubeVirtName string
for _, obj := range objects {
if kv, ok := obj.(*v1.KubeVirt); ok && kv.DeletionTimestamp == nil {
if kv.Status.Phase != "" {
kubeVirtName = kv.Name
}
}
}
if kubeVirtName == "" {
return nil
}
if obj, exists, err := c.kubeVirtStore.GetByKey(c.namespace + "/" + kubeVirtName); err != nil {
log.DefaultLogger().Reason(err).Errorf("Error loading the cluster config from KubeVirt cache, falling back to last good resource version '%s'", c.lastValidConfigResourceVersion)
return nil
} else if !exists {
// this path should not be possible
return nil
} else {
return obj.(*v1.KubeVirt)
}
}
func (c *ClusterConfig) HasDataSourceAPI() bool {
c.lock.Lock()
defer c.lock.Unlock()
objects := c.crdStore.List()
for _, obj := range objects {
if crd, ok := obj.(*extv1.CustomResourceDefinition); ok && crd.DeletionTimestamp == nil {
if isDataSourceCrd(crd) {
return true
}
}
}
return false
}
func (c *ClusterConfig) HasDataVolumeAPI() bool {
c.lock.Lock()
defer c.lock.Unlock()
objects := c.crdStore.List()
for _, obj := range objects {
if crd, ok := obj.(*extv1.CustomResourceDefinition); ok && crd.DeletionTimestamp == nil {
if isDataVolumeCrd(crd) {
return true
}
}
}
return false
}
func (c *ClusterConfig) HasServiceMonitorAPI() bool {
c.lock.Lock()
defer c.lock.Unlock()
objects := c.crdStore.List()
for _, obj := range objects {
if crd, ok := obj.(*extv1.CustomResourceDefinition); ok && crd.DeletionTimestamp == nil {
if isServiceMonitor(crd) {
return true
}
}
}
return false
}
func (c *ClusterConfig) HasPrometheusRuleAPI() bool {
c.lock.Lock()
defer c.lock.Unlock()
objects := c.crdStore.List()
for _, obj := range objects {
if crd, ok := obj.(*extv1.CustomResourceDefinition); ok && crd.DeletionTimestamp == nil {
if isPrometheusRules(crd) {
return true
}
}
}
return false
}
func parseNodeSelectors(str string) (map[string]string, error) {
nodeSelectors := make(map[string]string)
for _, s := range strings.Split(strings.TrimSpace(str), "\n") {
v := strings.Split(s, "=")
if len(v) != 2 {
return nil, fmt.Errorf("Invalid node selector: %s", s)
}
nodeSelectors[v[0]] = v[1]
}
return nodeSelectors, nil
}
func validateConfig(config *v1.KubeVirtConfiguration) error {
// set image pull policy
switch config.ImagePullPolicy {
case "", k8sv1.PullAlways, k8sv1.PullNever, k8sv1.PullIfNotPresent:
break
default:
return fmt.Errorf("invalid dev.imagePullPolicy in config: %v", config.ImagePullPolicy)
}
if config.DeveloperConfiguration.MemoryOvercommit <= 0 {
return fmt.Errorf("invalid memoryOvercommit in ConfigMap: %d", config.DeveloperConfiguration.MemoryOvercommit)
}
if config.DeveloperConfiguration.CPUAllocationRatio <= 0 {
return fmt.Errorf("invalid cpu allocation ratio in ConfigMap: %d", config.DeveloperConfiguration.CPUAllocationRatio)
}
if toleration := config.DeveloperConfiguration.LessPVCSpaceToleration; toleration < 0 || toleration > 100 {
return fmt.Errorf("invalid lessPVCSpaceToleration in ConfigMap: %d", toleration)
}
// set default network interface
switch config.NetworkConfiguration.NetworkInterface {
case "", string(v1.BridgeInterface), string(v1.DeprecatedSlirpInterface), string(v1.MasqueradeInterface):
break
default:
return fmt.Errorf("invalid default-network-interface in config: %v", config.NetworkConfiguration.NetworkInterface)
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virtconfig
import "kubevirt.io/kubevirt/pkg/virt-config/featuregate"
/*
This module is intended for determining whether an optional feature is enabled or not at the cluster-level.
*/
func (config *ClusterConfig) isFeatureGateDefined(featureGate string) bool {
for _, fg := range config.GetConfig().DeveloperConfiguration.FeatureGates {
if fg == featureGate {
return true
}
}
return false
}
func (config *ClusterConfig) isFeatureGateEnabled(featureGate string) bool {
if fg := featuregate.FeatureGateInfo(featureGate); fg != nil && fg.State == featuregate.GA {
return true
}
if config.isFeatureGateDefined(featureGate) {
return true
}
return false
}
func (config *ClusterConfig) ExpandDisksEnabled() bool {
return config.isFeatureGateEnabled(featuregate.ExpandDisksGate)
}
func (config *ClusterConfig) CPUManagerEnabled() bool {
return config.isFeatureGateEnabled(featuregate.CPUManager)
}
func (config *ClusterConfig) NUMAEnabled() bool {
return config.isFeatureGateEnabled(featuregate.NUMAFeatureGate)
}
func (config *ClusterConfig) DownwardMetricsEnabled() bool {
return config.isFeatureGateEnabled(featuregate.DownwardMetricsFeatureGate)
}
func (config *ClusterConfig) IgnitionEnabled() bool {
return config.isFeatureGateEnabled(featuregate.IgnitionGate)
}
func (config *ClusterConfig) LiveMigrationEnabled() bool {
return config.isFeatureGateEnabled(featuregate.LiveMigrationGate)
}
func (config *ClusterConfig) UtilityVolumesEnabled() bool {
return config.isFeatureGateEnabled(featuregate.UtilityVolumesGate)
}
func (config *ClusterConfig) SRIOVLiveMigrationEnabled() bool {
return config.isFeatureGateEnabled(featuregate.SRIOVLiveMigrationGate)
}
func (config *ClusterConfig) HypervStrictCheckEnabled() bool {
return config.isFeatureGateEnabled(featuregate.HypervStrictCheckGate)
}
func (config *ClusterConfig) CPUNodeDiscoveryEnabled() bool {
return config.isFeatureGateEnabled(featuregate.CPUNodeDiscoveryGate)
}
func (config *ClusterConfig) SidecarEnabled() bool {
return config.isFeatureGateEnabled(featuregate.SidecarGate)
}
func (config *ClusterConfig) GPUPassthroughEnabled() bool {
return config.isFeatureGateEnabled(featuregate.GPUGate)
}
func (config *ClusterConfig) SnapshotEnabled() bool {
return config.isFeatureGateEnabled(featuregate.SnapshotGate)
}
func (config *ClusterConfig) VMExportEnabled() bool {
return config.isFeatureGateEnabled(featuregate.VMExportGate)
}
func (config *ClusterConfig) HotplugVolumesEnabled() bool {
return config.isFeatureGateEnabled(featuregate.HotplugVolumesGate)
}
func (config *ClusterConfig) HostDiskEnabled() bool {
return config.isFeatureGateEnabled(featuregate.HostDiskGate)
}
func (config *ClusterConfig) VirtiofsStorageEnabled() bool {
return config.isFeatureGateEnabled(featuregate.VirtIOFSStorageVolumeGate)
}
func (config *ClusterConfig) MacvtapEnabled() bool {
return config.isFeatureGateEnabled(featuregate.MacvtapGate)
}
func (config *ClusterConfig) PasstEnabled() bool {
return config.isFeatureGateEnabled(featuregate.PasstGate)
}
func (config *ClusterConfig) HostDevicesPassthroughEnabled() bool {
return config.isFeatureGateEnabled(featuregate.HostDevicesGate)
}
func (config *ClusterConfig) RootEnabled() bool {
return config.isFeatureGateEnabled(featuregate.Root)
}
func (config *ClusterConfig) WorkloadEncryptionSEVEnabled() bool {
return config.isFeatureGateEnabled(featuregate.WorkloadEncryptionSEV)
}
func (config *ClusterConfig) WorkloadEncryptionTDXEnabled() bool {
return config.isFeatureGateEnabled(featuregate.WorkloadEncryptionTDX)
}
func (config *ClusterConfig) DockerSELinuxMCSWorkaroundEnabled() bool {
return config.isFeatureGateEnabled(featuregate.DockerSELinuxMCSWorkaround)
}
func (config *ClusterConfig) VSOCKEnabled() bool {
return config.isFeatureGateEnabled(featuregate.VSOCKGate)
}
func (config *ClusterConfig) MediatedDevicesHandlingDisabled() bool {
return config.isFeatureGateEnabled(featuregate.DisableMediatedDevicesHandling)
}
func (config *ClusterConfig) KubevirtSeccompProfileEnabled() bool {
return config.isFeatureGateEnabled(featuregate.KubevirtSeccompProfile)
}
func (config *ClusterConfig) HotplugNetworkInterfacesEnabled() bool {
return config.isFeatureGateEnabled(featuregate.HotplugNetworkIfacesGate)
}
func (config *ClusterConfig) PersistentReservationEnabled() bool {
return config.isFeatureGateEnabled(featuregate.PersistentReservation)
}
func (config *ClusterConfig) MultiArchitectureEnabled() bool {
return config.isFeatureGateEnabled(featuregate.MultiArchitecture)
}
func (config *ClusterConfig) AlignCPUsEnabled() bool {
return config.isFeatureGateEnabled(featuregate.AlignCPUsGate)
}
func (config *ClusterConfig) ImageVolumeEnabled() bool {
return config.isFeatureGateEnabled(featuregate.ImageVolume)
}
func (config *ClusterConfig) VideoConfigEnabled() bool {
return config.isFeatureGateEnabled(featuregate.VideoConfig)
}
func (config *ClusterConfig) NodeRestrictionEnabled() bool {
return config.isFeatureGateEnabled(featuregate.NodeRestrictionGate)
}
func (config *ClusterConfig) ObjectGraphEnabled() bool {
return config.isFeatureGateEnabled(featuregate.ObjectGraph)
}
func (config *ClusterConfig) DeclarativeHotplugVolumesEnabled() bool {
return config.isFeatureGateEnabled(featuregate.DeclarativeHotplugVolumesGate)
}
func (config *ClusterConfig) SecureExecutionEnabled() bool {
return config.isFeatureGateEnabled(featuregate.SecureExecution)
}
func (config *ClusterConfig) PanicDevicesEnabled() bool {
return config.isFeatureGateEnabled(featuregate.PanicDevicesGate)
}
func (config *ClusterConfig) PasstIPStackMigrationEnabled() bool {
return config.isFeatureGateEnabled(featuregate.PasstIPStackMigration)
}
func (config *ClusterConfig) DecentralizedLiveMigrationEnabled() bool {
return config.isFeatureGateEnabled(featuregate.DecentralizedLiveMigration)
}
func (config *ClusterConfig) GPUsWithDRAGateEnabled() bool {
return config.isFeatureGateEnabled(featuregate.GPUsWithDRAGate)
}
func (config *ClusterConfig) HostDevicesWithDRAEnabled() bool {
return config.isFeatureGateEnabled(featuregate.HostDevicesWithDRAGate)
}
func (config *ClusterConfig) IncrementalBackupEnabled() bool {
return config.isFeatureGateEnabled(featuregate.IncrementalBackupGate)
}
func (config *ClusterConfig) MigrationPriorityQueueEnabled() bool {
return config.isFeatureGateEnabled(featuregate.MigrationPriorityQueue)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package featuregate
const (
ExpandDisksGate = "ExpandDisks"
CPUManager = "CPUManager"
IgnitionGate = "ExperimentalIgnitionSupport"
HypervStrictCheckGate = "HypervStrictCheck"
SidecarGate = "Sidecar"
HostDevicesGate = "HostDevices"
// Owner: sig-storage
// Alpha: v0.30.0
// Beta: v1.3.0
SnapshotGate = "Snapshot"
// Owner: sig-storage
// Alpha: v0.55.0
// Beta: v1.3.0
VMExportGate = "VMExport"
HotplugVolumesGate = "HotplugVolumes"
HostDiskGate = "HostDisk"
// Owner: sig-storage
// Alpha: v1.7.0
//
// UtilityVolumes enables utility volumes feature which provides a general capability
// of hot-plugging volumes directly into the virt-launcher Pod for operational workflows
UtilityVolumesGate = "UtilityVolumes"
DownwardMetricsFeatureGate = "DownwardMetrics"
Root = "Root"
WorkloadEncryptionSEV = "WorkloadEncryptionSEV"
WorkloadEncryptionTDX = "WorkloadEncryptionTDX"
VSOCKGate = "VSOCK"
// KubevirtSeccompProfile indicate that Kubevirt will install its custom profile and
// user can tell Kubevirt to use it
KubevirtSeccompProfile = "KubevirtSeccompProfile"
// DisableMediatedDevicesHandling disables the handling of mediated
// devices, its creation and deletion
DisableMediatedDevicesHandling = "DisableMDEVConfiguration"
// PersistentReservation enables the use of the SCSI persistent reservation with the pr-helper daemon
PersistentReservation = "PersistentReservation"
// AlignCPUsGate allows emulator thread to assign two extra CPUs if needed to complete even parity.
AlignCPUsGate = "AlignCPUs"
// Owner: @xpivarc
// Alpha: v1.3.0
// Beta: v1.6.0
//
// NodeRestriction enables Kubelet's like NodeRestriction but for Kubevirt's virt-handler.
// This feature requires following Kubernetes feature gate "ServiceAccountTokenPodNodeInfo". The feature gate is available
// in Kubernetes 1.30 as Beta and was graduated in 1.32.
NodeRestrictionGate = "NodeRestriction"
// Owner: @Barakmor1
// Alpha: v1.6.0
// Beta: v1.7.0
//
// ImageVolume The ImageVolume FG in KubeVirt uses Kubernetes ImageVolume FG to eliminate
// the need for an extra container for containerDisk, improving security by avoiding
// bind mounts in virt-handler.
ImageVolume = "ImageVolume"
// Owner: @shellyka13
// Alpha: v1.6.0
//
// IncrementalBackup feature gate enables creating full and incremental backups for virtual machines.
// These backups leverage libvirt's native backup capabilities, providing a storage-agnostic solution.
// To support incremental backups, a QCOW2 overlay must be created on top of the VM's raw disk image.
IncrementalBackupGate = "IncrementalBackup"
VirtIOFSStorageVolumeGate = "EnableVirtioFsStorageVolumes"
// Owner: @alaypatel07
// Alpha: v1.6.0
//
// GPUsWithDRAGate allows users to create VMIs with DRA provisioned GPU devices
GPUsWithDRAGate = "GPUsWithDRA"
// Owner: @alaypatel07
// Alpha: v1.6.0
//
// HostDevicesWithDRAGate allows users to create VMIs with DRA provisioned Host devices
HostDevicesWithDRAGate = "HostDevicesWithDRA"
DecentralizedLiveMigration = "DecentralizedLiveMigration"
// Owner: sig-storage / @alromeros
// Alpha: v1.6.0
//
// ObjectGraph introduces a new subresource for VMs and VMIs.
// This subresource returns a structured list of k8s objects that are related
// to the specified VM or VMI, enabling better dependency tracking.
ObjectGraph = "ObjectGraph"
// DeclarativeHotplugVolumes enables adding/removing volumes declaratively
// also implicitly handles inject/eject CDROM
DeclarativeHotplugVolumesGate = "DeclarativeHotplugVolumes"
// Owner: sig-conpute / @jschintag
// Alpha: v1.6.0
// Beta: v1.7.0
//
// SecureExecution introduces secure execution of VMs on IBM Z architecture
SecureExecution = "SecureExecution"
// VideoConfig enables VM owners to specify a video device type (e.g., virtio, vga, bochs, ramfb) via the `Video` field, overriding default settings.
// Requires `autoattachGraphicsDevice` to be true or unset. Alpha feature, defaults unchanged.
// Owner: @dasionov
// Alpha: v1.6.0
// Beta: v1.7.0
//
VideoConfig = "VideoConfig"
// Owner: @varunrsekar
// Alpha: v1.6.0
// Beta: v1.7.0
//
// PanicDevices allows defining panic devices for signaling crashes in the guest for a VirtualMachineInstance.
PanicDevicesGate = "PanicDevices"
// Alpha: v1.6.0
//
// PasstIPStackMigration enables seamless migration with passt network binding.
PasstIPStackMigration = "PasstIPStackMigration"
// MigrationPriorityQueue enables controllers to assign priorities to migrations,
// ensuring system-initiated migrations (e.g., node drains, upgrades) take precedence
// over user-initiated ones (e.g., hot plug operations).
// Owner: sig-compute / @fossedihelm
// Alpha: v1.7.0
//
MigrationPriorityQueue = "MigrationPriorityQueue"
)
func init() {
RegisterFeatureGate(FeatureGate{Name: ImageVolume, State: Beta})
RegisterFeatureGate(FeatureGate{Name: ExpandDisksGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: CPUManager, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: IgnitionGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: HypervStrictCheckGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: SidecarGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: HostDevicesGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: SnapshotGate, State: Beta})
RegisterFeatureGate(FeatureGate{Name: VMExportGate, State: Beta})
RegisterFeatureGate(FeatureGate{Name: HotplugVolumesGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: HostDiskGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: DownwardMetricsFeatureGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: Root, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: WorkloadEncryptionSEV, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: WorkloadEncryptionTDX, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: VSOCKGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: KubevirtSeccompProfile, State: Beta})
RegisterFeatureGate(FeatureGate{Name: DisableMediatedDevicesHandling, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: PersistentReservation, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: AlignCPUsGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: NodeRestrictionGate, State: Beta})
RegisterFeatureGate(FeatureGate{Name: VirtIOFSStorageVolumeGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: GPUsWithDRAGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: HostDevicesWithDRAGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: DecentralizedLiveMigration, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: DeclarativeHotplugVolumesGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: SecureExecution, State: Beta})
RegisterFeatureGate(FeatureGate{Name: VideoConfig, State: Beta})
RegisterFeatureGate(FeatureGate{Name: PanicDevicesGate, State: Beta})
RegisterFeatureGate(FeatureGate{Name: UtilityVolumesGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: PasstIPStackMigration, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: IncrementalBackupGate, State: Alpha})
RegisterFeatureGate(FeatureGate{Name: MigrationPriorityQueue, State: Alpha})
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package featuregate
import (
"fmt"
v1 "kubevirt.io/api/core/v1"
)
type State string
const (
// Alpha represents features that are under experimentation.
// The feature is disabled by default and can be enabled explicitly through the FG.
Alpha State = "Alpha"
// Beta represents features that are under evaluation.
// The feature is disabled by default and can be enabled explicitly through the FG.
Beta State = "Beta"
// GA represents features that reached General Availability.
// GA features are considered feature-gate enabled, with no option to disable them by an FG.
GA State = "General Availability"
// Deprecated represents features that are going to be discontinued in the following release.
// Warn users about the eminent removal of the feature & FG.
// The feature is disabled by default and can be enabled explicitly through the FG.
Deprecated State = "Deprecated"
// Discontinued represents features that have been removed, with no option to enable them.
Discontinued State = "Discontinued"
WarningPattern = "feature gate %s is deprecated (feature state is %q), therefore it can be safely removed and is redundant. " +
"For more info, please look at: https://github.com/kubevirt/kubevirt/blob/main/docs/deprecation.md"
)
type FeatureGate struct {
Name string
State State
VmiSpecUsed func(spec *v1.VirtualMachineInstanceSpec) bool
Message string
}
var featureGates = map[string]FeatureGate{}
// RegisterFeatureGate adds a given feature-gate to the FG list
// In case the FG already exists (based on its name), it overrides the
// existing FG.
// If an inactive feature-gate is missing a message, a default one is set.
func RegisterFeatureGate(fg FeatureGate) {
if fg.State != Alpha && fg.State != Beta && fg.Message == "" {
fg.Message = fmt.Sprintf(WarningPattern, fg.Name, fg.State)
}
featureGates[fg.Name] = fg
}
func UnregisterFeatureGate(fgName string) {
delete(featureGates, fgName)
}
func FeatureGateInfo(featureGate string) *FeatureGate {
if fg, exist := featureGates[featureGate]; exist {
return &fg
}
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package featuregate
import "fmt"
const (
LiveMigrationGate = "LiveMigration"
SRIOVLiveMigrationGate = "SRIOVLiveMigration"
NonRoot = "NonRoot"
PSA = "PSA"
CPUNodeDiscoveryGate = "CPUNodeDiscovery"
NUMAFeatureGate = "NUMA"
GPUGate = "GPU"
// VMLiveUpdateFeaturesGate allows updating certain VM fields, such as CPU sockets to enable hot-plug functionality.
// GA: v1.5.0
VMLiveUpdateFeaturesGate = "VMLiveUpdateFeatures"
// CommonInstancetypesDeploymentGate enables the deployment of common-instancetypes by virt-operator
// Owner: @lyarwood
// Alpha: v1.1.0
// Beta: v1.2.0
// GA: v1.4.0
CommonInstancetypesDeploymentGate = "CommonInstancetypesDeploymentGate"
// HotplugNetworkIfacesGate controls the network interface hotplug feature lifecycle.
// Alpha: v1.1.0
// Beta: v1.3.0
// GA: v1.4.0
HotplugNetworkIfacesGate = "HotplugNICs"
// BochsDisplayForEFIGuests instructs EFI guests to start with Bochs display (instead of VGA)
// GA: v1.4.0
BochsDisplayForEFIGuests = "BochsDisplayForEFIGuests"
// AutoResourceLimitsGate enables automatic setting of vmi limits if there is a ResourceQuota with limits associated with the vmi namespace.
// GA: v1.5.0
AutoResourceLimitsGate = "AutoResourceLimitsGate"
// DockerSELinuxMCSWorkaround sets the SELinux level of all the non-compute virt-launcher containers to "s0".
// Deprecated: v1.4.0
DockerSELinuxMCSWorkaround = "DockerSELinuxMCSWorkaround"
// NetworkBindingPlugingsGate enables using a plugin to bind the pod and the VM network
// Alpha: v1.1.0
// Beta: v1.4.0
// GA: v1.5.0
NetworkBindingPlugingsGate = "NetworkBindingPlugins"
// DynamicPodInterfaceNamingGate enables a mechanism to dynamically determine the primary pod interface for KubeVirt virtual machines.
// Beta: v1.4.0
// GA: v1.5.0
DynamicPodInterfaceNamingGate = "DynamicPodInterfaceNaming"
PasstGate = "Passt"
MacvtapGate = "Macvtap"
// VirtIOFSGate enables the use of virtiofs for config and storage volumes.
// Discontinued in v1.7.0
VirtIOFSGate = "ExperimentalVirtiofsSupport"
// VolumesUpdateStrategy enables to specify the strategy on the volume updates.
// Introduced in v1.3.0
VolumesUpdateStrategy = "VolumesUpdateStrategy"
// VolumeMigration enables to migrate the storage. It depends on the VolumesUpdateStrategy feature.
// Introduced in v1.3.0
VolumeMigration = "VolumeMigration"
// DisableCustomSELinuxPolicy disables the installation of the custom SELinux policy for virt-launcher
DisableCustomSELinuxPolicy = "DisableCustomSELinuxPolicy"
ClusterProfiler = "ClusterProfiler"
// VMPersistentState enables persisting backend state files of VMs, such as the contents of the vTPM
VMPersistentState = "VMPersistentState"
// Owner: @lyarwood
// Alpha: v1.4.0
// Beta: v1.5.0
// GA: v1.6.0
//
// InstancetypeReferencePolicy allows a cluster admin to control how a VirtualMachine references instance types and preferences
// through the kv.spec.configuration.instancetype.referencePolicy configurable.
InstancetypeReferencePolicy = "InstancetypeReferencePolicy"
// Owner: sig-compute / @lyarwood
// Alpha: v1.0.0
// Deprecated: v1.8.0
//
// MultiArchitecture allows VM/VMIs to request and schedule to an architecture other than that of control plane
MultiArchitecture = "MultiArchitecture"
// VirtIOFSConfigVolumesGate enables the use of virtiofs for config volumes, i.e., config-maps, secrets, downwardAPI, etc.
// Ownwers: @germag @jcanocan
// Alpha: v1.5.0
// Beta: v1.6.0
// GA: v1.8.0
VirtIOFSConfigVolumesGate = "EnableVirtioFsConfigVolumes"
)
func init() {
RegisterFeatureGate(FeatureGate{Name: LiveMigrationGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: SRIOVLiveMigrationGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: NonRoot, State: GA})
RegisterFeatureGate(FeatureGate{Name: PSA, State: GA})
RegisterFeatureGate(FeatureGate{Name: CPUNodeDiscoveryGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: NUMAFeatureGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: CommonInstancetypesDeploymentGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: GPUGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: HotplugNetworkIfacesGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: BochsDisplayForEFIGuests, State: GA})
RegisterFeatureGate(FeatureGate{Name: VMLiveUpdateFeaturesGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: NetworkBindingPlugingsGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: DynamicPodInterfaceNamingGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: VolumesUpdateStrategy, State: GA})
RegisterFeatureGate(FeatureGate{Name: VolumeMigration, State: GA})
RegisterFeatureGate(FeatureGate{Name: DisableCustomSELinuxPolicy, State: GA})
RegisterFeatureGate(FeatureGate{Name: AutoResourceLimitsGate, State: GA})
RegisterFeatureGate(FeatureGate{Name: ClusterProfiler, State: GA})
RegisterFeatureGate(FeatureGate{Name: VMPersistentState, State: GA})
RegisterFeatureGate(FeatureGate{Name: DockerSELinuxMCSWorkaround, State: Deprecated, Message: fmt.Sprintf(
"DockerSELinuxMCSWorkaround has been deprecated since v1.4.")})
RegisterFeatureGate(FeatureGate{Name: VirtIOFSGate, State: Discontinued, Message: VirtioFsFeatureGateDiscontinueMessage})
RegisterFeatureGate(FeatureGate{Name: PasstGate, State: Discontinued, Message: PasstDiscontinueMessage, VmiSpecUsed: passtApiUsed})
RegisterFeatureGate(FeatureGate{Name: MacvtapGate, State: Discontinued, Message: MacvtapDiscontinueMessage, VmiSpecUsed: macvtapApiUsed})
RegisterFeatureGate(FeatureGate{Name: InstancetypeReferencePolicy, State: GA})
RegisterFeatureGate(FeatureGate{Name: MultiArchitecture, State: Deprecated, Message: "MultiArchitecture has been deprecated since v1.8.0"})
RegisterFeatureGate(FeatureGate{Name: VirtIOFSConfigVolumesGate, State: GA})
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package featuregate
import (
v1 "kubevirt.io/api/core/v1"
)
const MacvtapDiscontinueMessage = "Macvtap network binding is discontinued since v1.3. Please refer to Kubevirt user guide for alternatives."
func macvtapApiUsed(spec *v1.VirtualMachineInstanceSpec) bool {
for _, net := range spec.Domain.Devices.Interfaces {
if net.DeprecatedMacvtap != nil {
return true
}
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package featuregate
import (
v1 "kubevirt.io/api/core/v1"
)
const PasstDiscontinueMessage = "Passt network binding is discontinued since v1.3. Please refer to Kubevirt user guide for alternatives."
func passtApiUsed(spec *v1.VirtualMachineInstanceSpec) bool {
for _, net := range spec.Domain.Devices.Interfaces {
if net.DeprecatedPasst != nil {
return true
}
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package featuregate
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
)
func ValidateFeatureGates(featureGates []string, vmiSpec *v1.VirtualMachineInstanceSpec) []metav1.StatusCause {
var causes []metav1.StatusCause
for _, fgName := range featureGates {
fg := FeatureGateInfo(fgName)
if fg != nil && fg.State == Discontinued && fg.VmiSpecUsed != nil {
if used := fg.VmiSpecUsed(vmiSpec); used {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueNotSupported,
Message: fg.Message,
})
}
}
}
return causes
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virtconfig
/*
This module is intended for exposing the virtualization configuration that is available at the cluster-level and its default settings.
*/
import (
"kubevirt.io/client-go/log"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
const (
ParallelOutboundMigrationsPerNodeDefault uint32 = 2
ParallelMigrationsPerClusterDefault uint32 = 5
BandwidthPerMigrationDefault = "0Mi"
MigrationAllowAutoConverge bool = false
MigrationAllowPostCopy bool = false
MigrationProgressTimeout int64 = 150
MigrationCompletionTimeoutPerGiB int64 = 150
MigrationUtilityVolumesTimeoutSeconds int64 = 150
DefaultAMD64MachineType = "q35"
DefaultAARCH64MachineType = "virt"
DefaultS390XMachineType = "s390-ccw-virtio"
DefaultCPURequest = "100m"
DefaultMemoryOvercommit = 100
DefaultAMD64EmulatedMachines = "q35*,pc-q35*"
DefaultAARCH64EmulatedMachines = "virt*"
DefaultS390XEmulatedMachines = "s390-ccw-virtio*"
DefaultLessPVCSpaceToleration = 10
DefaultMinimumReservePVCBytes = 131072
DefaultNodeSelectors = ""
DefaultNetworkInterface = "bridge"
DefaultImagePullPolicy = k8sv1.PullIfNotPresent
DefaultAllowEmulation = false
DefaultUnsafeMigrationOverride = false
DefaultPermitSlirpInterface = false
SmbiosConfigDefaultFamily = "KubeVirt"
SmbiosConfigDefaultManufacturer = "KubeVirt"
SmbiosConfigDefaultProduct = "None"
DefaultPermitBridgeInterfaceOnPodNetwork = true
DefaultSELinuxLauncherType = ""
SupportedGuestAgentVersions = "2.*,3.*,4.*,5.*"
DefaultARCHOVMFPath = "/usr/share/OVMF"
DefaultAARCH64OVMFPath = "/usr/share/AAVMF"
DefaultS390xOVMFPath = ""
DefaultMemBalloonStatsPeriod uint32 = 10
DefaultCPUAllocationRatio = 10
DefaultDiskVerificationMemoryLimitBytes = 2000 * 1024 * 1024
DefaultVirtAPILogVerbosity = 2
DefaultVirtControllerLogVerbosity = 2
DefaultVirtHandlerLogVerbosity = 2
DefaultVirtLauncherLogVerbosity = 2
DefaultVirtOperatorLogVerbosity = 2
// Default REST configuration settings
DefaultVirtHandlerQPS float32 = 50
DefaultVirtHandlerBurst = 100
DefaultVirtControllerQPS float32 = 200
DefaultVirtControllerBurst = 400
DefaultVirtAPIQPS float32 = 200
DefaultVirtAPIBurst = 400
DefaultVirtWebhookClientQPS = 200
DefaultVirtWebhookClientBurst = 400
DefaultMaxHotplugRatio = 4
DefaultVMRolloutStrategy = v1.VMRolloutStrategyLiveUpdate
)
func IsARM64(arch string) bool {
return arch == "arm64"
}
func (c *ClusterConfig) GetMemBalloonStatsPeriod() uint32 {
return *c.GetConfig().MemBalloonStatsPeriod
}
func (c *ClusterConfig) AllowEmulation() bool {
return c.GetConfig().DeveloperConfiguration.UseEmulation
}
func (c *ClusterConfig) GetMigrationConfiguration() *v1.MigrationConfiguration {
migrationConfig := c.GetConfig().MigrationConfiguration
// For backward compatibility, AllowWorkloadDisruption will follow the
// value of AllowPostCopy, if not explicitly set
if migrationConfig.AllowWorkloadDisruption == nil {
allowPostCopy := false
if migrationConfig.AllowPostCopy != nil {
allowPostCopy = *migrationConfig.AllowPostCopy
}
migrationConfig.AllowWorkloadDisruption = &allowPostCopy
}
return migrationConfig
}
func (c *ClusterConfig) GetImagePullPolicy() (policy k8sv1.PullPolicy) {
return c.GetConfig().ImagePullPolicy
}
func (c *ClusterConfig) GetResourceVersion() string {
c.lock.Lock()
defer c.lock.Unlock()
return c.lastValidConfigResourceVersion
}
func (c *ClusterConfig) GetMachineType(arch string) string {
if c.GetConfig().MachineType != "" {
return c.GetConfig().MachineType
}
switch arch {
case "arm64":
return c.GetConfig().ArchitectureConfiguration.Arm64.MachineType
case "s390x":
return c.GetConfig().ArchitectureConfiguration.S390x.MachineType
default:
return c.GetConfig().ArchitectureConfiguration.Amd64.MachineType
}
}
func (c *ClusterConfig) GetCPUModel() string {
return c.GetConfig().CPUModel
}
func (c *ClusterConfig) GetCPURequest() *resource.Quantity {
return c.GetConfig().CPURequest
}
func (c *ClusterConfig) GetDiskVerification() *v1.DiskVerification {
return c.GetConfig().DeveloperConfiguration.DiskVerification
}
func (c *ClusterConfig) GetMemoryOvercommit() int {
return c.GetConfig().DeveloperConfiguration.MemoryOvercommit
}
func (c *ClusterConfig) GetEmulatedMachines(arch string) []string {
oldEmulatedMachines := c.GetConfig().EmulatedMachines
if oldEmulatedMachines != nil {
return oldEmulatedMachines
}
switch arch {
case "arm64":
return c.GetConfig().ArchitectureConfiguration.Arm64.EmulatedMachines
case "s390x":
return c.GetConfig().ArchitectureConfiguration.S390x.EmulatedMachines
default:
return c.GetConfig().ArchitectureConfiguration.Amd64.EmulatedMachines
}
}
func (c *ClusterConfig) GetLessPVCSpaceToleration() int {
return c.GetConfig().DeveloperConfiguration.LessPVCSpaceToleration
}
func (c *ClusterConfig) GetMinimumReservePVCBytes() uint64 {
return c.GetConfig().DeveloperConfiguration.MinimumReservePVCBytes
}
func (c *ClusterConfig) GetNodeSelectors() map[string]string {
return c.GetConfig().DeveloperConfiguration.NodeSelectors
}
func (c *ClusterConfig) GetDefaultNetworkInterface() string {
return c.GetConfig().NetworkConfiguration.NetworkInterface
}
func (c *ClusterConfig) GetDefaultArchitecture() string {
return c.GetConfig().ArchitectureConfiguration.DefaultArchitecture
}
func (c *ClusterConfig) GetSMBIOS() *v1.SMBiosConfiguration {
return c.GetConfig().SMBIOSConfig
}
func (c *ClusterConfig) IsBridgeInterfaceOnPodNetworkEnabled() bool {
return *c.GetConfig().NetworkConfiguration.PermitBridgeInterfaceOnPodNetwork
}
func (c *ClusterConfig) GetDefaultClusterConfig() *v1.KubeVirtConfiguration {
return c.defaultConfig
}
func (c *ClusterConfig) GetSELinuxLauncherType() string {
return c.GetConfig().SELinuxLauncherType
}
func (c *ClusterConfig) GetDefaultRuntimeClass() string {
return c.GetConfig().DefaultRuntimeClass
}
func (c *ClusterConfig) GetSupportedAgentVersions() []string {
return c.GetConfig().SupportedGuestAgentVersions
}
func (c *ClusterConfig) GetOVMFPath(arch string) string {
oldOvmfPath := c.GetConfig().OVMFPath
if oldOvmfPath != "" {
return oldOvmfPath
}
switch arch {
case "arm64":
return c.GetConfig().ArchitectureConfiguration.Arm64.OVMFPath
case "s390x":
return c.GetConfig().ArchitectureConfiguration.S390x.OVMFPath
default:
return c.GetConfig().ArchitectureConfiguration.Amd64.OVMFPath
}
}
func (c *ClusterConfig) GetCPUAllocationRatio() int {
return c.GetConfig().DeveloperConfiguration.CPUAllocationRatio
}
func (c *ClusterConfig) GetMinimumClusterTSCFrequency() *int64 {
return c.GetConfig().DeveloperConfiguration.MinimumClusterTSCFrequency
}
func (c *ClusterConfig) GetPermittedHostDevices() *v1.PermittedHostDevices {
return c.GetConfig().PermittedHostDevices
}
func (c *ClusterConfig) GetSupportContainerRequest(typeName v1.SupportContainerType, resourceName k8sv1.ResourceName) *resource.Quantity {
for _, containerResource := range c.GetConfig().SupportContainerResources {
if containerResource.Type == typeName {
quantity := containerResource.Resources.Requests[resourceName]
if !quantity.IsZero() {
return &quantity
}
}
}
return nil
}
func (c *ClusterConfig) GetSupportContainerLimit(typeName v1.SupportContainerType, resourceName k8sv1.ResourceName) *resource.Quantity {
for _, containerResource := range c.GetConfig().SupportContainerResources {
if containerResource.Type == typeName {
quantity := containerResource.Resources.Limits[resourceName]
if !quantity.IsZero() {
return &quantity
}
}
}
return nil
}
func canSelectNode(nodeSelector map[string]string, node *k8sv1.Node) bool {
for key, val := range nodeSelector {
labelValue, exist := node.Labels[key]
if !exist || val != labelValue {
return false
}
}
return true
}
func (c *ClusterConfig) GetDesiredMDEVTypes(node *k8sv1.Node) []string {
mdevTypesConf := c.GetConfig().MediatedDevicesConfiguration
if mdevTypesConf == nil {
return []string{}
}
nodeMdevConf := mdevTypesConf.NodeMediatedDeviceTypes
if nodeMdevConf != nil {
mdevTypesMap := make(map[string]struct{})
for _, nodeConfig := range nodeMdevConf {
if canSelectNode(nodeConfig.NodeSelector, node) {
types := nodeConfig.MediatedDeviceTypes
// Handle deprecated spelling
if len(types) == 0 {
types = nodeConfig.MediatedDevicesTypes
}
for _, mdevType := range types {
mdevTypesMap[mdevType] = struct{}{}
}
}
}
if len(mdevTypesMap) != 0 {
mdevTypesList := []string{}
for mdevType := range mdevTypesMap {
mdevTypesList = append(mdevTypesList, mdevType)
}
return mdevTypesList
}
}
// Handle deprecated spelling
if len(mdevTypesConf.MediatedDeviceTypes) == 0 {
return mdevTypesConf.MediatedDevicesTypes
}
return mdevTypesConf.MediatedDeviceTypes
}
type virtComponent int
const (
virtHandler virtComponent = iota
virtApi
virtController
virtOperator
virtLauncher
virtSynchronizationController
)
// Gets the component verbosity. nodeName can be empty, then it's ignored.
func (c *ClusterConfig) getComponentVerbosity(component virtComponent, nodeName string) uint {
logConf := c.GetConfig().DeveloperConfiguration.LogVerbosity
if nodeName != "" {
if level := logConf.NodeVerbosity[nodeName]; level != 0 {
return level
}
}
switch component {
case virtHandler:
return logConf.VirtHandler
case virtApi:
return logConf.VirtAPI
case virtController:
return logConf.VirtController
case virtOperator:
return logConf.VirtOperator
case virtLauncher:
return logConf.VirtLauncher
case virtSynchronizationController:
return logConf.VirtSynchronizationController
default:
log.Log.Errorf("getComponentVerbosity called with an unknown virtComponent: %v", component)
return 0
}
}
func (c *ClusterConfig) GetVirtHandlerVerbosity(nodeName string) uint {
return c.getComponentVerbosity(virtHandler, nodeName)
}
func (c *ClusterConfig) GetVirtAPIVerbosity(nodeName string) uint {
return c.getComponentVerbosity(virtApi, nodeName)
}
func (c *ClusterConfig) GetVirtControllerVerbosity(nodeName string) uint {
return c.getComponentVerbosity(virtController, nodeName)
}
func (c *ClusterConfig) GetVirtOperatorVerbosity(nodeName string) uint {
return c.getComponentVerbosity(virtOperator, nodeName)
}
func (c *ClusterConfig) GetVirtLauncherVerbosity() uint {
return c.getComponentVerbosity(virtLauncher, "")
}
func (c *ClusterConfig) GetVirtSynchronizationControllerVerbosity() uint {
return c.getComponentVerbosity(virtSynchronizationController, "")
}
// GetObsoleteCPUModels return slice of obsolete cpus which are used in node-labeller
func (c *ClusterConfig) GetObsoleteCPUModels() map[string]bool {
return c.GetConfig().ObsoleteCPUModels
}
// GetClusterCPUArch return the CPU architecture in ClusterConfig
func (c *ClusterConfig) GetClusterCPUArch() string {
return c.cpuArch
}
// GetDeveloperConfigurationUseEmulation return the UseEmulation value in DeveloperConfiguration
func (c *ClusterConfig) GetDeveloperConfigurationUseEmulation() bool {
config := c.GetConfig()
if config.DeveloperConfiguration != nil {
return config.DeveloperConfiguration.UseEmulation
}
return false
}
func (c *ClusterConfig) GetVMStateStorageClass() string {
return c.GetConfig().VMStateStorageClass
}
func (c *ClusterConfig) IsFreePageReportingDisabled() bool {
return c.GetConfig().VirtualMachineOptions != nil && c.GetConfig().VirtualMachineOptions.DisableFreePageReporting != nil
}
func (c *ClusterConfig) IsSerialConsoleLogDisabled() bool {
return c.GetConfig().VirtualMachineOptions != nil && c.GetConfig().VirtualMachineOptions.DisableSerialConsoleLog != nil
}
func (c *ClusterConfig) GetKSMConfiguration() *v1.KSMConfiguration {
return c.GetConfig().KSMConfiguration
}
func (c *ClusterConfig) GetMaximumCpuSockets() (numOfSockets uint32) {
liveConfig := c.GetConfig().LiveUpdateConfiguration
if liveConfig != nil && liveConfig.MaxCpuSockets != nil {
numOfSockets = *liveConfig.MaxCpuSockets
}
return
}
func (c *ClusterConfig) GetMaximumGuestMemory() *resource.Quantity {
liveConfig := c.GetConfig().LiveUpdateConfiguration
if liveConfig != nil {
return liveConfig.MaxGuest
}
return nil
}
func (c *ClusterConfig) GetMaxHotplugRatio() uint32 {
liveConfig := c.GetConfig().LiveUpdateConfiguration
if liveConfig == nil {
return 1
}
return liveConfig.MaxHotplugRatio
}
func (c *ClusterConfig) IsVMRolloutStrategyLiveUpdate() bool {
liveConfig := c.GetConfig().VMRolloutStrategy
return liveConfig == nil || *liveConfig == v1.VMRolloutStrategyLiveUpdate
}
func (c *ClusterConfig) GetNetworkBindings() map[string]v1.InterfaceBindingPlugin {
networkConfig := c.GetConfig().NetworkConfiguration
if networkConfig != nil {
return networkConfig.Binding
}
return nil
}
func (config *ClusterConfig) VGADisplayForEFIGuestsEnabled() bool {
VGADisplayForEFIGuestsAnnotationExists := false
kv := config.GetConfigFromKubeVirtCR()
if kv != nil {
_, VGADisplayForEFIGuestsAnnotationExists = kv.Annotations[v1.VGADisplayForEFIGuestsX86Annotation]
}
return VGADisplayForEFIGuestsAnnotationExists
}
func (c *ClusterConfig) GetInstancetypeReferencePolicy() v1.InstancetypeReferencePolicy {
instancetypeConfig := c.GetConfig().Instancetype
if instancetypeConfig != nil && instancetypeConfig.ReferencePolicy != nil {
return *instancetypeConfig.ReferencePolicy
}
// Default to the Reference InstancetypeReferencePolicy
return v1.Reference
}
func (c *ClusterConfig) ClusterProfilerEnabled() bool {
return c.GetConfig().DeveloperConfiguration.ClusterProfiler ||
c.isFeatureGateDefined(featuregate.ClusterProfiler)
}
package services
import (
"fmt"
"maps"
"strings"
k8sv1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
)
type NodeSelectorRenderer struct {
cpuFeatureLabels []string
cpuModelLabel string
machineTypeLabel string
hasDedicatedCPU bool
hyperv bool
podNodeSelectors map[string]string
tscFrequency *int64
vmiFeatures *v1.Features
realtimeEnabled bool
sevEnabled bool
sevESEnabled bool
SecureExecutionEnabled bool
sevSNPEnabled bool
tdxEnabled bool
}
type NodeSelectorRendererOption func(renderer *NodeSelectorRenderer)
func NewNodeSelectorRenderer(
vmiNodeSelectors map[string]string,
clusterWideConfNodeSelectors map[string]string,
architecture string,
opts ...NodeSelectorRendererOption,
) *NodeSelectorRenderer {
podNodeSelectors := map[string]string{v1.NodeSchedulable: "true"}
if architecture != "" {
podNodeSelectors[k8sv1.LabelArchStable] = strings.ToLower(architecture)
}
maps.Copy(podNodeSelectors, clusterWideConfNodeSelectors)
maps.Copy(podNodeSelectors, vmiNodeSelectors)
nodeSelectorRenderer := &NodeSelectorRenderer{podNodeSelectors: podNodeSelectors}
for _, opt := range opts {
opt(nodeSelectorRenderer)
}
return nodeSelectorRenderer
}
func (nsr *NodeSelectorRenderer) Render() map[string]string {
if nsr.hasDedicatedCPU {
nsr.enableSelectorLabel(v1.CPUManager)
}
if nsr.hyperv {
maps.Copy(nsr.podNodeSelectors, hypervNodeSelectors(nsr.vmiFeatures))
}
if nsr.cpuModelLabel != "" && nsr.cpuModelLabel != cpuModelLabel(v1.CPUModeHostModel) && nsr.cpuModelLabel != cpuModelLabel(v1.CPUModeHostPassthrough) {
nsr.enableSelectorLabel(nsr.cpuModelLabel)
}
if nsr.machineTypeLabel != "" {
nsr.enableSelectorLabel(nsr.machineTypeLabel)
}
for _, cpuFeatureLabel := range nsr.cpuFeatureLabels {
nsr.enableSelectorLabel(cpuFeatureLabel)
}
if nsr.isManualTSCFrequencyRequired() {
nsr.enableSelectorLabel(topology.ToTSCSchedulableLabel(*nsr.tscFrequency))
}
if nsr.realtimeEnabled {
nsr.enableSelectorLabel(v1.RealtimeLabel)
}
if nsr.sevEnabled {
nsr.enableSelectorLabel(v1.SEVLabel)
}
if nsr.sevESEnabled {
nsr.enableSelectorLabel(v1.SEVESLabel)
}
if nsr.sevSNPEnabled {
nsr.enableSelectorLabel(v1.SEVSNPLabel)
}
if nsr.SecureExecutionEnabled {
nsr.enableSelectorLabel(v1.SecureExecutionLabel)
}
if nsr.tdxEnabled {
nsr.enableSelectorLabel(v1.TDXLabel)
}
return nsr.podNodeSelectors
}
func (nsr *NodeSelectorRenderer) enableSelectorLabel(label string) {
nsr.podNodeSelectors[label] = "true"
}
func (nsr *NodeSelectorRenderer) isManualTSCFrequencyRequired() bool {
return nsr.tscFrequency != nil
}
func WithRealtime() NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.realtimeEnabled = true
}
}
func WithSEVSelector() NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.sevEnabled = true
}
}
func WithSEVESSelector() NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.sevESEnabled = true
}
}
func WithSEVSNPSelector() NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.sevSNPEnabled = true
}
}
func WithSecureExecutionSelector() NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.SecureExecutionEnabled = true
}
}
func WithTDXSelector() NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.tdxEnabled = true
}
}
func WithDedicatedCPU() NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.hasDedicatedCPU = true
}
}
func WithHyperv(features *v1.Features) NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.hyperv = true
renderer.vmiFeatures = features
}
}
func WithModelAndFeatureLabels(modelLabel string, cpuFeatureLabels ...string) NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.cpuFeatureLabels = cpuFeatureLabels
renderer.cpuModelLabel = modelLabel
}
}
func WithMachineType(machineType string) NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
machineTypeLabelKey := v1.SupportedMachineTypeLabel + machineType
renderer.machineTypeLabel = machineTypeLabelKey
}
}
func WithTSCTimer(tscFrequency *int64) NodeSelectorRendererOption {
return func(renderer *NodeSelectorRenderer) {
renderer.tscFrequency = tscFrequency
}
}
func CPUModelLabelFromCPUModel(vmi *v1.VirtualMachineInstance) (label string, err error) {
if vmi.Spec.Domain.CPU == nil || vmi.Spec.Domain.CPU.Model == "" {
err = fmt.Errorf("Cannot create CPU Model label, vmi spec is mising CPU model")
return
}
label = cpuModelLabel(vmi.Spec.Domain.CPU.Model)
return
}
func cpuModelLabel(cpuModel string) string {
return v1.CPUModelLabel + cpuModel
}
func CPUFeatureLabelsFromCPUFeatures(vmi *v1.VirtualMachineInstance) []string {
var labels []string
if vmi.Spec.Domain.CPU != nil && vmi.Spec.Domain.CPU.Features != nil {
for _, feature := range vmi.Spec.Domain.CPU.Features {
if feature.Policy == "" || feature.Policy == "require" {
labels = append(labels, v1.CPUFeatureLabel+feature.Name)
}
}
}
return labels
}
func hypervNodeSelectors(vmiFeatures *v1.Features) map[string]string {
nodeSelectors := make(map[string]string)
if vmiFeatures == nil || vmiFeatures.Hyperv == nil {
return nodeSelectors
}
for _, hv := range makeHVFeatureLabelTable(vmiFeatures) {
if isFeatureStateEnabled(hv.Feature) {
nodeSelectors[v1.HypervLabel+hv.Label] = "true"
}
}
if vmiFeatures.Hyperv.EVMCS != nil && (vmiFeatures.Hyperv.EVMCS.Enabled == nil || (*vmiFeatures.Hyperv.EVMCS.Enabled) == true) {
nodeSelectors[v1.CPUModelVendorLabel+IntelVendorName] = "true"
}
return nodeSelectors
}
type hvFeatureLabel struct {
Feature *v1.FeatureState
Label string
}
// makeHVFeatureLabelTable creates the mapping table between the VMI hyperv state and the label names.
// The table needs pointers to v1.FeatureHyperv struct, so it has to be generated and can't be a
// static var
func makeHVFeatureLabelTable(vmiFeatures *v1.Features) []hvFeatureLabel {
// The following HyperV features don't require support from the host kernel, according to inspection
// of the QEMU sources (4.0 - adb3321bfd)
// VAPIC, Relaxed, Spinlocks, VendorID
// VPIndex, SyNIC: depend on both MSR and capability
// IPI, TLBFlush: depend on KVM Capabilities
// Runtime, Reset, SyNICTimer, Frequencies, Reenlightenment: depend on KVM MSRs availability
// EVMCS: depends on KVM capability, but the only way to know that is enable it, QEMU doesn't do
// any check before that, so we leave it out
//
// see also https://schd.ws/hosted_files/devconfcz2019/cf/vkuznets_enlightening_kvm_devconf2019.pdf
// to learn about dependencies between enlightenments
hyperv := vmiFeatures.Hyperv // shortcut
syNICTimer := &v1.FeatureState{}
if hyperv.SyNICTimer != nil {
syNICTimer.Enabled = hyperv.SyNICTimer.Enabled
}
return []hvFeatureLabel{
{
Feature: hyperv.VPIndex,
Label: "vpindex",
},
{
Feature: hyperv.Runtime,
Label: "runtime",
},
{
Feature: hyperv.Reset,
Label: "reset",
},
{
// TODO: SyNIC depends on vp-index on QEMU level. We should enforce this constraint.
Feature: hyperv.SyNIC,
Label: "synic",
},
{
// TODO: SyNICTimer depends on SyNIC and Relaxed. We should enforce this constraint.
Feature: syNICTimer,
Label: "synictimer",
},
{
Feature: hyperv.Frequencies,
Label: "frequencies",
},
{
Feature: hyperv.Reenlightenment,
Label: "reenlightenment",
},
{
Feature: hyperv.TLBFlush,
Label: "tlbflush",
},
{
Feature: hyperv.IPI,
Label: "ipi",
},
}
}
package services
import (
"strconv"
"strings"
k8sv1 "k8s.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/pointer"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/util"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
const (
cacheHomeEnvVarName = "XDG_CACHE_HOME"
configHomeEnvVarName = "XDG_CONFIG_HOME"
runtimeDirEnvVarName = "XDG_RUNTIME_DIR"
)
type ContainerSpecRenderer struct {
imgPullPolicy k8sv1.PullPolicy
launcherImg string
name string
userID int64
volumeDevices []k8sv1.VolumeDevice
volumeMounts []k8sv1.VolumeMount
sharedFilesystems []string
resources k8sv1.ResourceRequirements
liveninessProbe *k8sv1.Probe
readinessProbe *k8sv1.Probe
ports []k8sv1.ContainerPort
capabilities *k8sv1.Capabilities
args []string
extraEnvVars []k8sv1.EnvVar
}
type Option func(*ContainerSpecRenderer)
func NewContainerSpecRenderer(containerName string, launcherImg string, imgPullPolicy k8sv1.PullPolicy, opts ...Option) *ContainerSpecRenderer {
computeContainerSpec := &ContainerSpecRenderer{
imgPullPolicy: imgPullPolicy,
launcherImg: launcherImg,
name: containerName,
}
for _, opt := range opts {
opt(computeContainerSpec)
}
return computeContainerSpec
}
func (csr *ContainerSpecRenderer) Render(cmd []string) k8sv1.Container {
return k8sv1.Container{
Name: csr.name,
Image: csr.launcherImg,
ImagePullPolicy: csr.imgPullPolicy,
SecurityContext: securityContext(csr.userID, csr.capabilities),
Command: cmd,
VolumeDevices: csr.volumeDevices,
VolumeMounts: csr.volumeMounts,
Resources: csr.resources,
Ports: csr.ports,
Env: csr.envVars(),
LivenessProbe: csr.liveninessProbe,
ReadinessProbe: csr.readinessProbe,
Args: csr.args,
TerminationMessagePolicy: k8sv1.TerminationMessageFallbackToLogsOnError,
}
}
func (csr *ContainerSpecRenderer) envVars() []k8sv1.EnvVar {
var env []k8sv1.EnvVar
if csr.userID != 0 {
env = append(env, xdgEnvironmentVariables()...)
}
if len(csr.sharedFilesystems) != 0 {
env = append(env, k8sv1.EnvVar{
Name: ENV_VAR_SHARED_FILESYSTEM_PATHS,
Value: strings.Join(csr.sharedFilesystems, ":"),
})
}
env = append(env, csr.extraEnvVars...)
return env
}
func WithNonRoot(userID int64) Option {
return func(renderer *ContainerSpecRenderer) {
renderer.userID = userID
}
}
func WithCapabilities(vmi *v1.VirtualMachineInstance) Option {
return func(renderer *ContainerSpecRenderer) {
if renderer.capabilities == nil {
renderer.capabilities = &k8sv1.Capabilities{
Add: requiredCapabilities(vmi),
}
} else {
renderer.capabilities.Add = requiredCapabilities(vmi)
}
}
}
func WithDropALLCapabilities() Option {
return func(renderer *ContainerSpecRenderer) {
if renderer.capabilities == nil {
renderer.capabilities = &k8sv1.Capabilities{
Drop: []k8sv1.Capability{"ALL"},
}
} else {
renderer.capabilities.Drop = []k8sv1.Capability{"ALL"}
}
}
}
func WithNoCapabilities() Option {
return func(renderer *ContainerSpecRenderer) {
renderer.capabilities = &k8sv1.Capabilities{
Drop: []k8sv1.Capability{"ALL"},
}
}
}
func WithVolumeDevices(devices ...k8sv1.VolumeDevice) Option {
return func(renderer *ContainerSpecRenderer) {
renderer.volumeDevices = devices
}
}
func WithVolumeMounts(mounts ...k8sv1.VolumeMount) Option {
return func(renderer *ContainerSpecRenderer) {
renderer.volumeMounts = mounts
}
}
func WithSharedFilesystems(paths ...string) Option {
return func(renderer *ContainerSpecRenderer) {
renderer.sharedFilesystems = paths
}
}
func WithResourceRequirements(resources k8sv1.ResourceRequirements) Option {
return func(renderer *ContainerSpecRenderer) {
renderer.resources = resources
}
}
func WithPorts(vmi *v1.VirtualMachineInstance) Option {
return func(renderer *ContainerSpecRenderer) {
renderer.ports = containerPortsFromVMI(vmi)
}
}
func WithArgs(args []string) Option {
return func(renderer *ContainerSpecRenderer) {
renderer.args = args
}
}
func WithLivelinessProbe(vmi *v1.VirtualMachineInstance) Option {
return func(renderer *ContainerSpecRenderer) {
v1.SetDefaults_Probe(vmi.Spec.LivenessProbe)
renderer.liveninessProbe = copyProbe(vmi.Spec.LivenessProbe)
updateLivenessProbe(vmi, renderer.liveninessProbe)
}
}
func WithReadinessProbe(vmi *v1.VirtualMachineInstance) Option {
return func(renderer *ContainerSpecRenderer) {
v1.SetDefaults_Probe(vmi.Spec.ReadinessProbe)
renderer.readinessProbe = copyProbe(vmi.Spec.ReadinessProbe)
updateReadinessProbe(vmi, renderer.readinessProbe)
}
}
func WithExtraEnvVars(envVars []k8sv1.EnvVar) Option {
return func(renderer *ContainerSpecRenderer) {
renderer.extraEnvVars = append(renderer.extraEnvVars, envVars...)
}
}
func xdgEnvironmentVariables() []k8sv1.EnvVar {
const varRun = "/var/run"
return []k8sv1.EnvVar{
{
Name: cacheHomeEnvVarName,
Value: util.VirtPrivateDir,
},
{
Name: configHomeEnvVarName,
Value: util.VirtPrivateDir,
},
{
Name: runtimeDirEnvVarName,
Value: varRun,
},
}
}
func securityContext(userId int64, requiredCapabilities *k8sv1.Capabilities) *k8sv1.SecurityContext {
isNonRoot := userId != 0
context := &k8sv1.SecurityContext{
RunAsUser: &userId,
RunAsNonRoot: &isNonRoot,
Capabilities: requiredCapabilities,
}
if isNonRoot {
context.RunAsGroup = &userId
context.AllowPrivilegeEscalation = pointer.P(false)
}
return context
}
func containerPortsFromVMI(vmi *v1.VirtualMachineInstance) []k8sv1.ContainerPort {
var ports []k8sv1.ContainerPort
for _, iface := range vmi.Spec.Domain.Devices.Interfaces {
if iface.Ports != nil {
for _, port := range iface.Ports {
if port.Protocol == "" {
port.Protocol = "TCP"
}
ports = append(ports, k8sv1.ContainerPort{Protocol: k8sv1.Protocol(port.Protocol), Name: port.Name, ContainerPort: port.Port})
}
}
}
return ports
}
func updateReadinessProbe(vmi *v1.VirtualMachineInstance, computeProbe *k8sv1.Probe) {
if vmi.Spec.ReadinessProbe.GuestAgentPing != nil {
wrapGuestAgentPingWithVirtProbe(vmi, computeProbe)
computeProbe.InitialDelaySeconds = computeProbe.InitialDelaySeconds + LibvirtStartupDelay
return
}
wrapExecProbeWithVirtProbe(vmi, computeProbe)
computeProbe.InitialDelaySeconds = computeProbe.InitialDelaySeconds + LibvirtStartupDelay
}
func updateLivenessProbe(vmi *v1.VirtualMachineInstance, computeProbe *k8sv1.Probe) {
if vmi.Spec.LivenessProbe.GuestAgentPing != nil {
wrapGuestAgentPingWithVirtProbe(vmi, computeProbe)
computeProbe.InitialDelaySeconds = computeProbe.InitialDelaySeconds + LibvirtStartupDelay
return
}
wrapExecProbeWithVirtProbe(vmi, computeProbe)
computeProbe.InitialDelaySeconds = computeProbe.InitialDelaySeconds + LibvirtStartupDelay
}
func wrapExecProbeWithVirtProbe(vmi *v1.VirtualMachineInstance, probe *k8sv1.Probe) {
if probe == nil || probe.ProbeHandler.Exec == nil {
return
}
originalCommand := probe.ProbeHandler.Exec.Command
if len(originalCommand) < 1 || originalCommand[0] == "virt-probe" {
return
}
wrappedCommand := []string{
"virt-probe",
"--domainName", api.VMINamespaceKeyFunc(vmi),
"--timeoutSeconds", strconv.FormatInt(int64(probe.TimeoutSeconds), 10),
"--command", originalCommand[0],
"--",
}
wrappedCommand = append(wrappedCommand, originalCommand[1:]...)
probe.ProbeHandler.Exec.Command = wrappedCommand
// we add 1s to the pod probe to compensate for the additional steps in probing
probe.TimeoutSeconds += 1
}
func requiredCapabilities(vmi *v1.VirtualMachineInstance) []k8sv1.Capability {
// These capabilies are always required because we set them on virt-launcher binary
capabilities := []k8sv1.Capability{CAP_NET_BIND_SERVICE}
if !util.IsNonRootVMI(vmi) {
// add a CAP_SYS_NICE capability to allow setting cpu affinity
capabilities = append(capabilities, CAP_SYS_NICE)
}
return capabilities
}
package services
import (
"fmt"
"strconv"
"strings"
"k8s.io/client-go/tools/cache"
"kubevirt.io/client-go/log"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/downwardmetrics"
netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
"kubevirt.io/kubevirt/pkg/tpm"
"kubevirt.io/kubevirt/pkg/util"
"kubevirt.io/kubevirt/pkg/util/hardware"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
type ResourceRendererOption func(renderer *ResourceRenderer)
type ResourceRenderer struct {
vmLimits k8sv1.ResourceList
vmRequests k8sv1.ResourceList
calculatedLimits k8sv1.ResourceList
calculatedRequests k8sv1.ResourceList
resourceClaims []k8sv1.ResourceClaim
}
type resourcePredicate func(*v1.VirtualMachineInstance) bool
type VMIResourcePredicates struct {
resourceRules []VMIResourceRule
vmi *v1.VirtualMachineInstance
}
type VMIResourceRule struct {
predicate resourcePredicate
option ResourceRendererOption
}
func not(p resourcePredicate) resourcePredicate {
return func(vmi *v1.VirtualMachineInstance) bool {
return !p(vmi)
}
}
func NewVMIResourceRule(p resourcePredicate, option ResourceRendererOption) VMIResourceRule {
return VMIResourceRule{predicate: p, option: option}
}
func doesVMIRequireDedicatedCPU(vmi *v1.VirtualMachineInstance) bool {
return vmi.IsCPUDedicated()
}
func NewResourceRenderer(vmLimits k8sv1.ResourceList, vmRequests k8sv1.ResourceList, options ...ResourceRendererOption) *ResourceRenderer {
limits := map[k8sv1.ResourceName]resource.Quantity{}
requests := map[k8sv1.ResourceName]resource.Quantity{}
copyResources(vmLimits, limits)
copyResources(vmRequests, requests)
resourceRenderer := &ResourceRenderer{
vmLimits: limits,
vmRequests: requests,
calculatedLimits: map[k8sv1.ResourceName]resource.Quantity{},
calculatedRequests: map[k8sv1.ResourceName]resource.Quantity{},
resourceClaims: []k8sv1.ResourceClaim{},
}
for _, opt := range options {
opt(resourceRenderer)
}
return resourceRenderer
}
func (rr *ResourceRenderer) Limits() k8sv1.ResourceList {
podLimits := map[k8sv1.ResourceName]resource.Quantity{}
copyResources(rr.calculatedLimits, podLimits)
copyResources(rr.vmLimits, podLimits)
return podLimits
}
func (rr *ResourceRenderer) Requests() k8sv1.ResourceList {
podRequests := map[k8sv1.ResourceName]resource.Quantity{}
copyResources(rr.calculatedRequests, podRequests)
copyResources(rr.vmRequests, podRequests)
return podRequests
}
func (rr *ResourceRenderer) Claims() []k8sv1.ResourceClaim {
return rr.resourceClaims
}
func (rr *ResourceRenderer) ResourceRequirements() k8sv1.ResourceRequirements {
return k8sv1.ResourceRequirements{
Limits: rr.Limits(),
Requests: rr.Requests(),
Claims: rr.Claims(),
}
}
func WithEphemeralStorageRequest() ResourceRendererOption {
return func(renderer *ResourceRenderer) {
// Add ephemeral storage request to container to be used by Kubevirt. This amount of ephemeral storage
// should be added to the user's request.
ephemeralStorageOverhead := resource.MustParse(ephemeralStorageOverheadSize)
ephemeralStorageRequested := renderer.vmRequests[k8sv1.ResourceEphemeralStorage]
ephemeralStorageRequested.Add(ephemeralStorageOverhead)
renderer.vmRequests[k8sv1.ResourceEphemeralStorage] = ephemeralStorageRequested
if ephemeralStorageLimit, ephemeralStorageLimitDefined := renderer.vmLimits[k8sv1.ResourceEphemeralStorage]; ephemeralStorageLimitDefined {
ephemeralStorageLimit.Add(ephemeralStorageOverhead)
renderer.vmLimits[k8sv1.ResourceEphemeralStorage] = ephemeralStorageLimit
}
}
}
// Helper function to extract IO thread CPU count from VMI
func getIOThreadsCount(vmi *v1.VirtualMachineInstance) int64 {
if vmi == nil || vmi.Spec.Domain.IOThreads == nil ||
vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount == nil {
return 0
}
return int64(*vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount)
}
func WithoutDedicatedCPU(vmi *v1.VirtualMachineInstance, cpuAllocationRatio int, withCPULimits bool) ResourceRendererOption {
return func(renderer *ResourceRenderer) {
cpu := vmi.Spec.Domain.CPU
vcpus := calcVCPUs(cpu)
ioThreadCPUs := getIOThreadsCount(vmi) // Get IO thread count
totalCPUs := vcpus + ioThreadCPUs // Include IO threads
if totalCPUs != 0 && cpuAllocationRatio > 0 {
val := float64(totalCPUs) / float64(cpuAllocationRatio)
vcpusStr := fmt.Sprintf("%g", val)
if val < 1 {
val *= 1000
vcpusStr = fmt.Sprintf("%gm", val)
}
renderer.calculatedRequests[k8sv1.ResourceCPU] = resource.MustParse(vcpusStr)
if withCPULimits {
renderer.calculatedLimits[k8sv1.ResourceCPU] = resource.MustParse(strconv.FormatInt(totalCPUs, 10))
}
}
}
}
func WithGPUsDevicePlugins(gpus []v1.GPU) ResourceRendererOption {
return func(r *ResourceRenderer) {
res := r.ResourceRequirements()
for _, g := range gpus {
if g.DeviceName != "" && g.ClaimRequest == nil {
requestResource(&res, g.DeviceName)
}
}
copyResources(res.Limits, r.calculatedLimits)
copyResources(res.Requests, r.calculatedRequests)
}
}
func WithGPUsDRA(gpus []v1.GPU) ResourceRendererOption {
return func(r *ResourceRenderer) {
res := r.ResourceRequirements()
for _, g := range gpus {
if g.DeviceName == "" && g.ClaimRequest != nil {
requestResourceClaims(&res, &k8sv1.ResourceClaim{
Name: *g.ClaimRequest.ClaimName,
Request: *g.ClaimRequest.RequestName,
})
}
}
copyResources(res.Limits, r.calculatedLimits)
copyResources(res.Requests, r.calculatedRequests)
copyResourceClaims(&res, &r.resourceClaims)
}
}
// WithHostDevicesDevicePlugins adds resource requests/limits only for HostDevices managed by device plugins.
func WithHostDevicesDevicePlugins(hostDevices []v1.HostDevice) ResourceRendererOption {
return func(r *ResourceRenderer) {
resources := r.ResourceRequirements()
for _, hd := range hostDevices {
if hd.DeviceName != "" && hd.ClaimRequest == nil {
requestResource(&resources, hd.DeviceName)
}
}
copyResources(resources.Limits, r.calculatedLimits)
copyResources(resources.Requests, r.calculatedRequests)
}
}
// WithHostDevicesDRA adds ResourceClaims for HostDevices provisioned via DRA.
func WithHostDevicesDRA(hostDevices []v1.HostDevice) ResourceRendererOption {
return func(r *ResourceRenderer) {
resources := r.ResourceRequirements()
for _, hd := range hostDevices {
if hd.DeviceName == "" && hd.ClaimRequest != nil && hd.ClaimRequest.ClaimName != nil && hd.ClaimRequest.RequestName != nil {
requestResourceClaims(&resources, &k8sv1.ResourceClaim{
Name: *hd.ClaimRequest.ClaimName,
Request: *hd.ClaimRequest.RequestName,
})
}
}
copyResources(resources.Limits, r.calculatedLimits)
copyResources(resources.Requests, r.calculatedRequests)
copyResourceClaims(&resources, &r.resourceClaims)
}
}
func WithHugePages(vmMemory *v1.Memory, memoryOverhead resource.Quantity) ResourceRendererOption {
return func(renderer *ResourceRenderer) {
hugepageType := k8sv1.ResourceName(k8sv1.ResourceHugePagesPrefix + vmMemory.Hugepages.PageSize)
hugepagesMemReq := renderer.vmRequests.Memory()
// If requested, use the guest memory to allocate hugepages
if vmMemory != nil && vmMemory.Guest != nil {
requests := hugepagesMemReq.Value()
guest := vmMemory.Guest.Value()
if requests > guest {
hugepagesMemReq = vmMemory.Guest
}
}
renderer.calculatedRequests[hugepageType] = *hugepagesMemReq
renderer.calculatedLimits[hugepageType] = *hugepagesMemReq
reqMemDiff := resource.NewScaledQuantity(0, resource.Kilo)
limMemDiff := resource.NewScaledQuantity(0, resource.Kilo)
// In case the guest memory and the requested memory are different, add the difference
// to the overhead
if vmMemory != nil && vmMemory.Guest != nil {
requests := renderer.vmRequests.Memory().Value()
limits := renderer.vmLimits.Memory().Value()
guest := vmMemory.Guest.Value()
if requests > guest {
reqMemDiff.Add(*renderer.vmRequests.Memory())
reqMemDiff.Sub(*vmMemory.Guest)
}
if limits > guest {
limMemDiff.Add(*renderer.vmLimits.Memory())
limMemDiff.Sub(*vmMemory.Guest)
}
}
// Set requested memory equals to overhead memory
reqMemDiff.Add(memoryOverhead)
renderer.vmRequests[k8sv1.ResourceMemory] = *reqMemDiff
if _, ok := renderer.vmLimits[k8sv1.ResourceMemory]; ok {
limMemDiff.Add(memoryOverhead)
renderer.vmLimits[k8sv1.ResourceMemory] = *limMemDiff
}
}
}
func WithMemoryRequests(vmiSpecMemory *v1.Memory, overcommit int) ResourceRendererOption {
return func(renderer *ResourceRenderer) {
limit, hasLimit := renderer.vmLimits[k8sv1.ResourceMemory]
request, hasRequest := renderer.vmRequests[k8sv1.ResourceMemory]
if hasLimit && !limit.IsZero() && (!hasRequest || request.IsZero()) {
renderer.vmRequests[k8sv1.ResourceMemory] = limit
}
if _, exists := renderer.vmRequests[k8sv1.ResourceMemory]; exists {
return
}
var memory *resource.Quantity
if vmiSpecMemory != nil && vmiSpecMemory.Guest != nil {
memory = vmiSpecMemory.Guest
} else if vmiSpecMemory != nil && vmiSpecMemory.Hugepages != nil {
if hugepagesSize, err := resource.ParseQuantity(vmiSpecMemory.Hugepages.PageSize); err == nil {
memory = &hugepagesSize
}
}
if memory != nil && memory.Value() > 0 {
if overcommit == 100 {
renderer.vmRequests[k8sv1.ResourceMemory] = *memory
} else {
value := (memory.Value() * int64(100)) / int64(overcommit)
renderer.vmRequests[k8sv1.ResourceMemory] = *resource.NewQuantity(value, memory.Format)
}
}
}
}
func WithMemoryOverhead(guestResourceSpec v1.ResourceRequirements, memoryOverhead resource.Quantity) ResourceRendererOption {
return func(renderer *ResourceRenderer) {
memoryRequest := renderer.vmRequests[k8sv1.ResourceMemory]
if !guestResourceSpec.OvercommitGuestOverhead {
memoryRequest.Add(memoryOverhead)
}
renderer.vmRequests[k8sv1.ResourceMemory] = memoryRequest
if memoryLimit, ok := renderer.vmLimits[k8sv1.ResourceMemory]; ok {
memoryLimit.Add(memoryOverhead)
renderer.vmLimits[k8sv1.ResourceMemory] = memoryLimit
}
}
}
func WithAutoMemoryLimits(namespace string, namespaceStore cache.Store) ResourceRendererOption {
return func(renderer *ResourceRenderer) {
requestRatio := getMemoryLimitsRatio(namespace, namespaceStore)
memoryRequest := renderer.vmRequests[k8sv1.ResourceMemory]
value := int64(float64(memoryRequest.Value()) * requestRatio)
renderer.calculatedLimits[k8sv1.ResourceMemory] = *resource.NewQuantity(value, memoryRequest.Format)
}
}
func WithCPUPinning(vmi *v1.VirtualMachineInstance, annotations map[string]string, additionalCPUs uint32) ResourceRendererOption {
return func(renderer *ResourceRenderer) {
cpu := vmi.Spec.Domain.CPU
vcpus := hardware.GetNumberOfVCPUs(cpu)
ioThreadCPUs := getIOThreadsCount(vmi)
if vcpus != 0 {
totalCPUs := vcpus + ioThreadCPUs
renderer.vmLimits[k8sv1.ResourceCPU] = *resource.NewQuantity(totalCPUs, resource.BinarySI)
renderer.vmRequests[k8sv1.ResourceCPU] = *resource.NewQuantity(totalCPUs, resource.BinarySI) // Ensure requests match limits for dedicated CPUs
} else {
ioThreadsCount := resource.NewQuantity(ioThreadCPUs, resource.BinarySI)
if cpuLimit, ok := renderer.vmLimits[k8sv1.ResourceCPU]; ok {
cpuLimit.Add(*ioThreadsCount)
renderer.vmLimits[k8sv1.ResourceCPU] = cpuLimit
}
if cpuRequest, ok := renderer.vmRequests[k8sv1.ResourceCPU]; ok {
cpuRequest.Add(*ioThreadsCount)
renderer.vmRequests[k8sv1.ResourceCPU] = cpuRequest
}
}
if cpu.IsolateEmulatorThread {
emulatorThreadCPUs := resource.NewQuantity(1, resource.BinarySI)
limits := renderer.vmLimits[k8sv1.ResourceCPU]
_, emulatorThreadCompleteToEvenParityAnnotationExists := annotations[v1.EmulatorThreadCompleteToEvenParity]
if emulatorThreadCompleteToEvenParityAnnotationExists &&
(limits.Value()+int64(additionalCPUs))%2 == 0 {
emulatorThreadCPUs = resource.NewQuantity(2, resource.BinarySI)
}
limits.Add(*emulatorThreadCPUs)
renderer.vmLimits[k8sv1.ResourceCPU] = limits
if cpuRequest, ok := renderer.vmRequests[k8sv1.ResourceCPU]; ok {
cpuRequest.Add(*emulatorThreadCPUs)
renderer.vmRequests[k8sv1.ResourceCPU] = cpuRequest
}
}
// Align memory limits with requests for consistency
if memRequest, ok := renderer.vmRequests[k8sv1.ResourceMemory]; ok {
renderer.vmLimits[k8sv1.ResourceMemory] = memRequest
}
}
}
func WithNetworkResources(networkToResourceMap map[string]string) ResourceRendererOption {
return func(renderer *ResourceRenderer) {
resources := renderer.ResourceRequirements()
for _, resourceName := range networkToResourceMap {
if resourceName != "" {
requestResource(&resources, resourceName)
}
}
copyResources(resources.Limits, renderer.calculatedLimits)
copyResources(resources.Requests, renderer.calculatedRequests)
}
}
func WithSEV() ResourceRendererOption {
return func(renderer *ResourceRenderer) {
resources := renderer.ResourceRequirements()
requestResource(&resources, SevDevice)
copyResources(resources.Limits, renderer.calculatedLimits)
copyResources(resources.Requests, renderer.calculatedRequests)
}
}
func WithPersistentReservation() ResourceRendererOption {
return func(renderer *ResourceRenderer) {
resources := renderer.ResourceRequirements()
requestResource(&resources, PrDevice)
copyResources(resources.Limits, renderer.calculatedLimits)
copyResources(resources.Requests, renderer.calculatedRequests)
}
}
func copyResources(srcResources, dstResources k8sv1.ResourceList) {
for key, value := range srcResources {
dstResources[key] = value
}
}
func requestResourceClaims(resources *k8sv1.ResourceRequirements, claim *k8sv1.ResourceClaim) {
if resources.Claims == nil {
resources.Claims = []k8sv1.ResourceClaim{*claim}
return
}
resources.Claims = append(resources.Claims, *claim)
}
func copyResourceClaims(resources *k8sv1.ResourceRequirements, claims *[]k8sv1.ResourceClaim) {
existing := make(map[string]struct{})
for _, c := range *claims {
existing[c.Name] = struct{}{}
}
for _, value := range resources.Claims {
if _, found := existing[value.Name]; found {
continue // skip duplicates by Name
}
*claims = append(*claims, value)
existing[value.Name] = struct{}{}
}
}
// GetMemoryOverhead computes the estimation of total
// memory needed for the domain to operate properly.
// This includes the memory needed for the guest and memory
// for Qemu and OS overhead.
// The return value is overhead memory quantity
//
// Note: This is the best estimation we were able to come up with
//
// and is still not 100% accurate
func GetMemoryOverhead(vmi *v1.VirtualMachineInstance, cpuArch string, additionalOverheadRatio *string) resource.Quantity {
domain := vmi.Spec.Domain
vmiMemoryReq := domain.Resources.Requests.Memory()
overhead := *resource.NewScaledQuantity(0, resource.Kilo)
// Add the memory needed for pagetables (one bit for every 512b of RAM size)
pagetableMemory := resource.NewScaledQuantity(vmiMemoryReq.ScaledValue(resource.Kilo), resource.Kilo)
pagetableMemory.Set(pagetableMemory.Value() / 512)
overhead.Add(*pagetableMemory)
// Add fixed overhead for KubeVirt components, as seen in a random run, rounded up to the nearest MiB
// Note: shared libraries are included in the size, so every library is counted (wrongly) as many times as there are
// processes using it. However, the extra memory is only in the order of 10MiB and makes for a nice safety margin.
overhead.Add(resource.MustParse(VirtLauncherMonitorOverhead))
overhead.Add(resource.MustParse(VirtLauncherOverhead))
overhead.Add(resource.MustParse(VirtlogdOverhead))
overhead.Add(resource.MustParse(VirtqemudOverhead))
overhead.Add(resource.MustParse(QemuOverhead))
// Add CPU table overhead (8 MiB per vCPU and 8 MiB per IO thread)
// overhead per vcpu in MiB
coresMemory := resource.MustParse("8Mi")
var vcpus int64
if domain.CPU != nil {
vcpus = hardware.GetNumberOfVCPUs(domain.CPU)
} else {
// Currently, a default guest CPU topology is set by the API webhook mutator, if not set by a user.
// However, this wasn't always the case.
// In case when the guest topology isn't set, take value from resources request or limits.
resources := vmi.Spec.Domain.Resources
if cpuLimit, ok := resources.Limits[k8sv1.ResourceCPU]; ok {
vcpus = cpuLimit.Value()
} else if cpuRequests, ok := resources.Requests[k8sv1.ResourceCPU]; ok {
vcpus = cpuRequests.Value()
}
}
// if neither CPU topology nor request or limits provided, set vcpus to 1
if vcpus < 1 {
vcpus = 1
}
value := coresMemory.Value() * vcpus
coresMemory = *resource.NewQuantity(value, coresMemory.Format)
overhead.Add(coresMemory)
// static overhead for IOThread
overhead.Add(resource.MustParse("8Mi"))
// Add video RAM overhead
if domain.Devices.AutoattachGraphicsDevice == nil || *domain.Devices.AutoattachGraphicsDevice == true {
overhead.Add(resource.MustParse("32Mi"))
}
// When use uefi boot on aarch64 with edk2 package, qemu will create 2 pflash(64Mi each, 128Mi in total)
// it should be considered for memory overhead
// Additional information can be found here: https://github.com/qemu/qemu/blob/master/hw/arm/virt.c#L120
if cpuArch == "arm64" {
overhead.Add(resource.MustParse("128Mi"))
}
// Additional overhead of 1G for VFIO devices. VFIO requires all guest RAM to be locked
// in addition to MMIO memory space to allow DMA. 1G is often the size of reserved MMIO space on x86 systems.
// Additial information can be found here: https://www.redhat.com/archives/libvir-list/2015-November/msg00329.html
if util.IsVFIOVMI(vmi) {
overhead.Add(resource.MustParse("1Gi"))
}
// DownardMetrics volumes are using emptyDirs backed by memory.
// the max. disk size is only 256Ki.
if downwardmetrics.HasDownwardMetricDisk(vmi) {
overhead.Add(resource.MustParse("1Mi"))
}
addProbeOverheads(vmi, &overhead)
// Consider memory overhead for SEV guests.
// Additional information can be found here: https://libvirt.org/kbase/launch_security_sev.html#memory
if util.IsSEVVMI(vmi) || util.IsSEVSNPVMI(vmi) || util.IsSEVESVMI(vmi) {
overhead.Add(resource.MustParse("256Mi"))
}
// Having a TPM device will spawn a swtpm process
// In `ps`, swtpm has VSZ of 53808 and RSS of 3496, so 53Mi should do
if tpm.HasDevice(&vmi.Spec) {
overhead.Add(resource.MustParse("53Mi"))
}
if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
overhead.Add(resource.MustParse("100Mi"))
}
// Multiplying the ratio is expected to be the last calculation before returning overhead
if additionalOverheadRatio != nil && *additionalOverheadRatio != "" {
ratio, err := strconv.ParseFloat(*additionalOverheadRatio, 64)
if err != nil {
// This error should never happen as it's already validated by webhooks
log.Log.Warningf("cannot add additional overhead to virt infra overhead calculation: %v", err)
return overhead
}
overhead = multiplyMemory(overhead, ratio)
}
return overhead
}
// Request a resource by name. This function bumps the number of resources,
// both its limits and requests attributes.
//
// If we were operating with a regular resource (CPU, memory, network
// bandwidth), we would need to take care of QoS. For example,
// https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed
// explains that when Limits are set but Requests are not then scheduler
// assumes that Requests are the same as Limits for a particular resource.
//
// But this function is not called for this standard resources but for
// resources managed by device plugins. The device plugin design document says
// the following on the matter:
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/resource-management/device-plugin.md#end-user-story
//
// ```
// Devices can be selected using the same process as for OIRs in the pod spec.
// Devices have no impact on QOS. However, for the alpha, we expect the request
// to have limits == requests.
// ```
//
// Which suggests that, for resources managed by device plugins, 1) limits
// should be equal to requests; and 2) QoS rules do not apVFIO//
// Hence we don't copy Limits value to Requests if the latter is missing.
func requestResource(resources *k8sv1.ResourceRequirements, resourceName string) {
name := k8sv1.ResourceName(resourceName)
bumpResources(resources.Limits, name)
bumpResources(resources.Requests, name)
}
func bumpResources(resources k8sv1.ResourceList, name k8sv1.ResourceName) {
unitQuantity := *resource.NewQuantity(1, resource.DecimalSI)
val, ok := resources[name]
if ok {
val.Add(unitQuantity)
resources[name] = val
} else {
resources[name] = unitQuantity
}
}
func calcVCPUs(cpu *v1.CPU) int64 {
if cpu != nil {
return hardware.GetNumberOfVCPUs(cpu)
}
return int64(1)
}
func getRequiredResources(vmi *v1.VirtualMachineInstance, allowEmulation bool) k8sv1.ResourceList {
res := k8sv1.ResourceList{}
if netvmispec.RequiresTunDevice(vmi) {
res[TunDevice] = resource.MustParse("1")
}
if netvmispec.RequiresVirtioNetDevice(vmi, allowEmulation) {
// Note that about network interface, allowEmulation does not make
// any difference on eventual Domain xml, but uniformly making
// /dev/vhost-net unavailable and libvirt implicitly fallback
// to use QEMU userland NIC emulation.
res[VhostNetDevice] = resource.MustParse("1")
}
if !allowEmulation {
res[KvmDevice] = resource.MustParse("1")
}
if util.IsAutoAttachVSOCK(vmi) {
res[VhostVsockDevice] = resource.MustParse("1")
}
return res
}
func WithVirtualizationResources(virtResources k8sv1.ResourceList) ResourceRendererOption {
return func(renderer *ResourceRenderer) {
copyResources(virtResources, renderer.vmLimits)
}
}
func validatePermittedHostDevices(spec *v1.VirtualMachineInstanceSpec, config *virtconfig.ClusterConfig) error {
errors := make([]string, 0)
if hostDevs := config.GetPermittedHostDevices(); hostDevs != nil {
// build a map of all permitted host devices
supportedHostDevicesMap := make(map[string]bool)
for _, dev := range hostDevs.PciHostDevices {
supportedHostDevicesMap[dev.ResourceName] = true
}
for _, dev := range hostDevs.MediatedDevices {
supportedHostDevicesMap[dev.ResourceName] = true
}
for _, dev := range hostDevs.USB {
supportedHostDevicesMap[dev.ResourceName] = true
}
//TODO @alayp: add proper validation for DRA GPUs in beta
if !config.GPUsWithDRAGateEnabled() {
for _, hostDev := range spec.Domain.Devices.GPUs {
if _, exist := supportedHostDevicesMap[hostDev.DeviceName]; !exist {
errors = append(errors, fmt.Sprintf("GPU %s is not permitted in permittedHostDevices configuration", hostDev.DeviceName))
}
}
}
for _, hostDev := range spec.Domain.Devices.HostDevices {
if _, exist := supportedHostDevicesMap[hostDev.DeviceName]; !exist {
errors = append(errors, fmt.Sprintf("HostDevice %s is not permitted in permittedHostDevices configuration", hostDev.DeviceName))
}
}
}
if len(errors) != 0 {
return fmt.Errorf("%s", strings.Join(errors, " "))
}
return nil
}
func sidecarResources(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
resources := k8sv1.ResourceRequirements{
Requests: k8sv1.ResourceList{},
Limits: k8sv1.ResourceList{},
}
if reqCpu := config.GetSupportContainerRequest(v1.SideCar, k8sv1.ResourceCPU); reqCpu != nil {
resources.Requests[k8sv1.ResourceCPU] = *reqCpu
}
if reqMem := config.GetSupportContainerRequest(v1.SideCar, k8sv1.ResourceMemory); reqMem != nil {
resources.Requests[k8sv1.ResourceMemory] = *reqMem
}
// add default cpu and memory limits to enable cpu pinning if requested
// TODO(vladikr): make the hookSidecar express resources
if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
resources.Limits[k8sv1.ResourceCPU] = resource.MustParse("200m")
if limCpu := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceCPU); limCpu != nil {
resources.Limits[k8sv1.ResourceCPU] = *limCpu
}
resources.Limits[k8sv1.ResourceMemory] = resource.MustParse("64M")
if limMem := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceMemory); limMem != nil {
resources.Limits[k8sv1.ResourceMemory] = *limMem
}
resources.Requests[k8sv1.ResourceCPU] = resources.Limits[k8sv1.ResourceCPU]
resources.Requests[k8sv1.ResourceMemory] = resources.Limits[k8sv1.ResourceMemory]
} else {
if limCpu := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceCPU); limCpu != nil {
resources.Limits[k8sv1.ResourceCPU] = *limCpu
}
if limMem := config.GetSupportContainerLimit(v1.SideCar, k8sv1.ResourceMemory); limMem != nil {
resources.Limits[k8sv1.ResourceMemory] = *limMem
}
}
return resources
}
func initContainerResourceRequirementsForVMI(vmi *v1.VirtualMachineInstance, containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
if vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed() {
return k8sv1.ResourceRequirements{
Limits: initContainerDedicatedCPURequiredResources(containerType, config),
Requests: initContainerDedicatedCPURequiredResources(containerType, config),
}
} else {
return k8sv1.ResourceRequirements{
Limits: initContainerMinimalLimits(containerType, config),
Requests: initContainerMinimalRequests(containerType, config),
}
}
}
func initContainerDedicatedCPURequiredResources(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
res := k8sv1.ResourceList{
k8sv1.ResourceCPU: resource.MustParse("10m"),
k8sv1.ResourceMemory: resource.MustParse("40M"),
}
if cpuLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceCPU); cpuLim != nil {
res[k8sv1.ResourceCPU] = *cpuLim
}
if memLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceMemory); memLim != nil {
res[k8sv1.ResourceMemory] = *memLim
}
return res
}
func initContainerMinimalLimits(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
res := k8sv1.ResourceList{
k8sv1.ResourceCPU: resource.MustParse("100m"),
k8sv1.ResourceMemory: resource.MustParse("40M"),
}
if cpuLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceCPU); cpuLim != nil {
res[k8sv1.ResourceCPU] = *cpuLim
}
if memLim := config.GetSupportContainerLimit(containerType, k8sv1.ResourceMemory); memLim != nil {
res[k8sv1.ResourceMemory] = *memLim
}
return res
}
func initContainerMinimalRequests(containerType v1.SupportContainerType, config *virtconfig.ClusterConfig) k8sv1.ResourceList {
res := k8sv1.ResourceList{
k8sv1.ResourceCPU: resource.MustParse("10m"),
k8sv1.ResourceMemory: resource.MustParse("1M"),
}
if cpuReq := config.GetSupportContainerRequest(containerType, k8sv1.ResourceCPU); cpuReq != nil {
res[k8sv1.ResourceCPU] = *cpuReq
}
if memReq := config.GetSupportContainerRequest(containerType, k8sv1.ResourceMemory); memReq != nil {
res[k8sv1.ResourceMemory] = *memReq
}
return res
}
func hotplugContainerResourceRequirementsForVMI(config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
return k8sv1.ResourceRequirements{
Limits: hotplugContainerLimits(config),
Requests: hotplugContainerRequests(config),
}
}
func hotplugContainerLimits(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
cpuQuantity := resource.MustParse("100m")
if cpu := config.GetSupportContainerLimit(v1.HotplugAttachment, k8sv1.ResourceCPU); cpu != nil {
cpuQuantity = *cpu
}
memQuantity := resource.MustParse("80M")
if mem := config.GetSupportContainerLimit(v1.HotplugAttachment, k8sv1.ResourceMemory); mem != nil {
memQuantity = *mem
}
return k8sv1.ResourceList{
k8sv1.ResourceCPU: cpuQuantity,
k8sv1.ResourceMemory: memQuantity,
}
}
func hotplugContainerRequests(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
cpuQuantity := resource.MustParse("10m")
if cpu := config.GetSupportContainerRequest(v1.HotplugAttachment, k8sv1.ResourceCPU); cpu != nil {
cpuQuantity = *cpu
}
memQuantity := resource.MustParse("2M")
if mem := config.GetSupportContainerRequest(v1.HotplugAttachment, k8sv1.ResourceMemory); mem != nil {
memQuantity = *mem
}
return k8sv1.ResourceList{
k8sv1.ResourceCPU: cpuQuantity,
k8sv1.ResourceMemory: memQuantity,
}
}
func vmExportContainerResourceRequirements(config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
return k8sv1.ResourceRequirements{
Limits: vmExportContainerLimits(config),
Requests: vmExportContainerRequests(config),
}
}
func vmExportContainerLimits(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
cpuQuantity := resource.MustParse("1")
if cpu := config.GetSupportContainerLimit(v1.VMExport, k8sv1.ResourceCPU); cpu != nil {
cpuQuantity = *cpu
}
memQuantity := resource.MustParse("1024Mi")
if mem := config.GetSupportContainerLimit(v1.VMExport, k8sv1.ResourceMemory); mem != nil {
memQuantity = *mem
}
return k8sv1.ResourceList{
k8sv1.ResourceCPU: cpuQuantity,
k8sv1.ResourceMemory: memQuantity,
}
}
func vmExportContainerRequests(config *virtconfig.ClusterConfig) k8sv1.ResourceList {
cpuQuantity := resource.MustParse("100m")
if cpu := config.GetSupportContainerRequest(v1.VMExport, k8sv1.ResourceCPU); cpu != nil {
cpuQuantity = *cpu
}
memQuantity := resource.MustParse("200Mi")
if mem := config.GetSupportContainerRequest(v1.VMExport, k8sv1.ResourceMemory); mem != nil {
memQuantity = *mem
}
return k8sv1.ResourceList{
k8sv1.ResourceCPU: cpuQuantity,
k8sv1.ResourceMemory: memQuantity,
}
}
func multiplyMemory(mem resource.Quantity, multiplication float64) resource.Quantity {
overheadAddition := float64(mem.ScaledValue(resource.Kilo)) * (multiplication - 1.0)
additionalOverhead := resource.NewScaledQuantity(int64(overheadAddition), resource.Kilo)
mem.Add(*additionalOverhead)
return mem
}
func getMemoryLimitsRatio(namespace string, namespaceStore cache.Store) float64 {
if namespaceStore == nil {
return DefaultMemoryLimitOverheadRatio
}
obj, exists, err := namespaceStore.GetByKey(namespace)
if err != nil {
log.Log.Warningf("Error retrieving namespace from informer. Using the default memory limits ratio. %s", err.Error())
return DefaultMemoryLimitOverheadRatio
} else if !exists {
log.Log.Warningf("namespace %s does not exist. Using the default memory limits ratio.", namespace)
return DefaultMemoryLimitOverheadRatio
}
ns, ok := obj.(*k8sv1.Namespace)
if !ok {
log.Log.Errorf("couldn't cast object to Namespace: %+v", obj)
return DefaultMemoryLimitOverheadRatio
}
value, ok := ns.GetLabels()[v1.AutoMemoryLimitsRatioLabel]
if !ok {
return DefaultMemoryLimitOverheadRatio
}
limitRatioValue, err := strconv.ParseFloat(value, 64)
if err != nil || limitRatioValue < 1.0 {
log.Log.Warningf("%s is an invalid value for %s label in namespace %s. Using the default one: %f", value, v1.AutoMemoryLimitsRatioLabel, namespace, DefaultMemoryLimitOverheadRatio)
return DefaultMemoryLimitOverheadRatio
}
return limitRatioValue
}
package services
import (
"fmt"
"path/filepath"
"strings"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
"kubevirt.io/kubevirt/pkg/tpm"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/config"
containerdisk "kubevirt.io/kubevirt/pkg/container-disk"
"kubevirt.io/kubevirt/pkg/hooks"
hostdisk "kubevirt.io/kubevirt/pkg/host-disk"
"kubevirt.io/kubevirt/pkg/network/downwardapi"
"kubevirt.io/kubevirt/pkg/storage/cbt"
"kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virtiofs"
)
type VolumeRendererOption func(renderer *VolumeRenderer) error
type VolumeRenderer struct {
useImageVolumes bool
launcherImage string
imageIDs map[string]string
clusterConfig *virtconfig.ClusterConfig
containerDiskDir string
ephemeralDiskDir string
virtShareDir string
namespace string
vmiVolumes []v1.Volume
podVolumes []k8sv1.Volume
podVolumeMounts []k8sv1.VolumeMount
sharedFilesystemPaths []string
volumeDevices []k8sv1.VolumeDevice
}
func NewVolumeRenderer(clusterConfig *virtconfig.ClusterConfig, imageVolumeFeatureGateEnabled bool, launcherImage string, imageIDs map[string]string, namespace string, ephemeralDisk string, containerDiskDir string, virtShareDir string, volumeOptions ...VolumeRendererOption) (*VolumeRenderer, error) {
volumeRenderer := &VolumeRenderer{
useImageVolumes: imageVolumeFeatureGateEnabled,
launcherImage: launcherImage,
imageIDs: imageIDs,
clusterConfig: clusterConfig,
containerDiskDir: containerDiskDir,
ephemeralDiskDir: ephemeralDisk,
namespace: namespace,
virtShareDir: virtShareDir,
}
for _, volumeOption := range volumeOptions {
if err := volumeOption(volumeRenderer); err != nil {
return nil, err
}
}
return volumeRenderer, nil
}
func (vr *VolumeRenderer) Mounts() []k8sv1.VolumeMount {
volumeMounts := []k8sv1.VolumeMount{
mountPath("private", util.VirtPrivateDir),
mountPath("public", util.VirtShareDir),
mountPath("ephemeral-disks", vr.ephemeralDiskDir),
mountPath("libvirt-runtime", "/var/run/libvirt"),
mountPath("sockets", filepath.Join(vr.virtShareDir, "sockets")),
}
if !vr.useImageVolumes {
volumeMounts = append(volumeMounts, mountPathWithPropagation(containerDisks, vr.containerDiskDir, k8sv1.MountPropagationHostToContainer))
}
return append(volumeMounts, vr.podVolumeMounts...)
}
func (vr *VolumeRenderer) Volumes() []k8sv1.Volume {
volumes := []k8sv1.Volume{
emptyDirVolume("private"),
emptyDirVolume("public"),
emptyDirVolume("sockets"),
emptyDirVolume(virtBinDir),
emptyDirVolume("libvirt-runtime"),
emptyDirVolume("ephemeral-disks"),
}
if !vr.useImageVolumes {
volumes = append(volumes, emptyDirVolume(containerDisks))
}
return append(volumes, vr.podVolumes...)
}
func (vr *VolumeRenderer) VolumeDevices() []k8sv1.VolumeDevice {
return vr.volumeDevices
}
func (vr *VolumeRenderer) SharedFilesystemPaths() []string {
return vr.sharedFilesystemPaths
}
func mountPath(name string, path string) k8sv1.VolumeMount {
return k8sv1.VolumeMount{
Name: name,
MountPath: path,
}
}
func mountPathWithPropagation(name string, path string, propagation k8sv1.MountPropagationMode) k8sv1.VolumeMount {
return k8sv1.VolumeMount{
Name: name,
MountPath: path,
MountPropagation: &propagation,
}
}
func emptyDirVolume(name string) k8sv1.Volume {
return k8sv1.Volume{
Name: name,
VolumeSource: k8sv1.VolumeSource{
EmptyDir: &k8sv1.EmptyDirVolumeSource{}},
}
}
func downwardAPIDirVolume(name, path, fieldPath string) k8sv1.Volume {
return k8sv1.Volume{
Name: name,
VolumeSource: k8sv1.VolumeSource{
DownwardAPI: &k8sv1.DownwardAPIVolumeSource{
Items: []k8sv1.DownwardAPIVolumeFile{
{
Path: path,
FieldRef: &k8sv1.ObjectFieldSelector{
FieldPath: fieldPath,
},
},
},
},
},
}
}
func withVMIVolumes(pvcStore cache.Store, vmiSpecVolumes []v1.Volume, vmiVolumeStatus []v1.VolumeStatus) VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
hotplugVolumesByName := hotplugVolumes(vmiVolumeStatus, vmiSpecVolumes)
for _, volume := range vmiSpecVolumes {
if _, isHotplugVolume := hotplugVolumesByName[volume.Name]; isHotplugVolume {
continue
}
if volume.PersistentVolumeClaim != nil {
if err := renderer.handlePVCVolume(volume, pvcStore); err != nil {
return err
}
}
if volume.Ephemeral != nil {
if err := renderer.handleEphemeralVolume(volume, pvcStore); err != nil {
return err
}
}
if volume.HostDisk != nil {
renderer.handleHostDisk(volume)
}
if volume.DataVolume != nil {
if err := renderer.handleDataVolume(volume, pvcStore); err != nil {
return err
}
}
if volume.DownwardMetrics != nil {
renderer.handleDownwardMetrics(volume)
}
if volume.CloudInitNoCloud != nil {
renderer.handleCloudInitNoCloud(volume)
}
if volume.Sysprep != nil {
if err := renderer.handleSysprep(volume); err != nil {
return err
}
}
if volume.CloudInitConfigDrive != nil {
renderer.handleCloudInitConfigDrive(volume)
}
}
return nil
}
}
func withVMIConfigVolumes(vmiDisks []v1.Disk, vmiVolumes []v1.Volume) VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
volumes := make(map[string]v1.Volume)
for _, volume := range vmiVolumes {
volumes[volume.Name] = volume
if volume.Secret != nil {
renderer.addSecretVolume(volume)
}
if volume.ConfigMap != nil {
renderer.addConfigMapVolume(volume)
}
if volume.DownwardAPI != nil {
renderer.addDownwardAPIVolume(volume)
}
}
for _, disk := range vmiDisks {
volume, ok := volumes[disk.Name]
if !ok {
continue
}
if volume.Secret != nil {
renderer.addSecretVolumeMount(volume)
}
if volume.ConfigMap != nil {
renderer.addConfigMapVolumeMount(volume)
}
if volume.DownwardAPI != nil {
renderer.addDownwardAPIVolumeMount(volume)
}
}
return nil
}
}
func withImageVolumes(vmi *v1.VirtualMachineInstance) VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
for i, volume := range vmi.Spec.Volumes {
if volume.ContainerDisk != nil {
renderer.addContainerDiskVolume(volume)
renderer.addContainerDiskVolumeMount(volume, i)
}
}
if util.HasKernelBootContainerImage(vmi) {
kbc := vmi.Spec.Domain.Firmware.KernelBoot.Container
renderer.addKernelBootVolume(kbc)
renderer.addKernelBootVolumeMount()
}
if shouldAddLauncherBinaryVolume(vmi, renderer.imageIDs) {
renderer.addLauncherBinaryVolume()
}
return nil
}
}
func (vr *VolumeRenderer) handleCloudInitConfigDrive(volume v1.Volume) {
if volume.CloudInitConfigDrive != nil {
if volume.CloudInitConfigDrive.UserDataSecretRef != nil {
// attach a secret referenced by the user
volumeName := volume.Name + "-udata"
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volumeName,
VolumeSource: k8sv1.VolumeSource{
Secret: &k8sv1.SecretVolumeSource{
SecretName: volume.CloudInitConfigDrive.UserDataSecretRef.Name,
},
},
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volume.Name, "userdata"),
SubPath: "userdata",
ReadOnly: true,
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volume.Name, "userData"),
SubPath: "userData",
ReadOnly: true,
})
}
if volume.CloudInitConfigDrive.NetworkDataSecretRef != nil {
// attach a secret referenced by the networkdata
volumeName := volume.Name + "-ndata"
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volumeName,
VolumeSource: k8sv1.VolumeSource{
Secret: &k8sv1.SecretVolumeSource{
SecretName: volume.CloudInitConfigDrive.NetworkDataSecretRef.Name,
},
},
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volume.Name, "networkdata"),
SubPath: "networkdata",
ReadOnly: true,
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volume.Name, "networkData"),
SubPath: "networkData",
ReadOnly: true,
})
}
}
}
func (vr *VolumeRenderer) handleSysprep(volume v1.Volume) error {
if volume.Sysprep != nil {
var volumeSource k8sv1.VolumeSource
// attach a Secret or ConfigMap referenced by the user
volumeSource, err := sysprepVolumeSource(*volume.Sysprep)
if err != nil {
return err
}
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: volumeSource,
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: filepath.Join(config.SysprepSourceDir, volume.Name),
ReadOnly: true,
})
}
return nil
}
func hotplugVolumes(vmiVolumeStatus []v1.VolumeStatus, vmiSpecVolumes []v1.Volume) map[string]struct{} {
hotplugVolumeSet := map[string]struct{}{}
for _, volumeStatus := range vmiVolumeStatus {
if volumeStatus.HotplugVolume != nil {
hotplugVolumeSet[volumeStatus.Name] = struct{}{}
}
}
// This detects hotplug volumes for a started but not ready VMI
for _, volume := range vmiSpecVolumes {
if (volume.DataVolume != nil && volume.DataVolume.Hotpluggable) || (volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.Hotpluggable) {
hotplugVolumeSet[volume.Name] = struct{}{}
}
}
return hotplugVolumeSet
}
func withAccessCredentials(accessCredentials []v1.AccessCredential) VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
for _, accessCred := range accessCredentials {
secretName := ""
if accessCred.SSHPublicKey != nil && accessCred.SSHPublicKey.Source.Secret != nil {
secretName = accessCred.SSHPublicKey.Source.Secret.SecretName
} else if accessCred.UserPassword != nil && accessCred.UserPassword.Source.Secret != nil {
secretName = accessCred.UserPassword.Source.Secret.SecretName
}
if secretName == "" {
continue
}
volumeName := secretName + "-access-cred"
renderer.podVolumes = append(renderer.podVolumes, k8sv1.Volume{
Name: volumeName,
VolumeSource: k8sv1.VolumeSource{
Secret: &k8sv1.SecretVolumeSource{
SecretName: secretName,
},
},
})
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volumeName),
ReadOnly: true,
})
}
return nil
}
}
func PathForSwtpm(vmi *v1.VirtualMachineInstance) string {
swtpmPath := "/var/lib/libvirt/swtpm"
if util.IsNonRootVMI(vmi) {
swtpmPath = filepath.Join(util.VirtPrivateDir, "libvirt", "qemu", "swtpm")
}
return swtpmPath
}
func PathForSwtpmLocalca(vmi *v1.VirtualMachineInstance) string {
localCaPath := "/var/lib/swtpm-localca"
if util.IsNonRootVMI(vmi) {
localCaPath = filepath.Join(util.VirtPrivateDir, "var", "lib", "swtpm-localca")
}
return localCaPath
}
func PathForNVram(vmi *v1.VirtualMachineInstance) string {
nvramPath := "/var/lib/libvirt/qemu/nvram"
if util.IsNonRootVMI(vmi) {
nvramPath = filepath.Join(util.VirtPrivateDir, "libvirt", "qemu", "nvram")
}
return nvramPath
}
func withBackendStorage(vmi *v1.VirtualMachineInstance, backendStoragePVCName string) VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
if !backendstorage.IsBackendStorageNeeded(vmi) {
return nil
}
volumeName := "vm-state"
renderer.podVolumes = append(renderer.podVolumes, k8sv1.Volume{
Name: volumeName,
VolumeSource: k8sv1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: backendStoragePVCName,
ReadOnly: false,
},
},
})
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
ReadOnly: false,
MountPath: "/run/kubevirt-private/backend-storage-meta",
SubPath: "meta",
})
if util.IsNonRootVMI(vmi) {
// For non-root VMIs, the TPM state lives under /var/run/kubevirt-private/libvirt/qemu/swtpm
// To persist it, we need the persistent PVC to be mounted under that location.
// /var/run/kubevirt-private is an emptyDir, and k8s would automatically create the right sub-directories under it.
// However, the sub-directories would get created as root:<fsGroup>, with a mode like 0755 (drwxr-xr-x), preventing write access to them.
// Depending on the storage class used, the SELinux label of the sub-directories can also be problematic (like nfs_t for nfs-csi).
// Creating emptydirs for each intermediate directory (+ setting fsGroup to 107) solves both issues.
// The only viable alternative would be to use an init container to `mkdir -p /var/run/kubevirt-private/libvirt/qemu/swtpm`,
// but init containers are expensive, and emptyDirs were deemed to be the least undesirable approach.
renderer.podVolumes = append(renderer.podVolumes,
emptyDirVolume("private-libvirt"),
emptyDirVolume("private-libvirt-qemu"))
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: "private-libvirt",
MountPath: filepath.Join(util.VirtPrivateDir, "libvirt"),
}, k8sv1.VolumeMount{
Name: "private-libvirt-qemu",
MountPath: filepath.Join(util.VirtPrivateDir, "libvirt", "qemu"),
})
}
if tpm.HasPersistentDevice(&vmi.Spec) {
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
ReadOnly: false,
MountPath: PathForSwtpm(vmi),
SubPath: "swtpm",
}, k8sv1.VolumeMount{
Name: volumeName,
ReadOnly: false,
MountPath: PathForSwtpmLocalca(vmi),
SubPath: "swtpm-localca",
})
}
if backendstorage.HasPersistentEFI(&vmi.Spec) {
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
ReadOnly: false,
MountPath: PathForNVram(vmi),
SubPath: "nvram",
})
}
if cbt.HasCBTStateEnabled(vmi.Status.ChangedBlockTracking) {
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
ReadOnly: false,
MountPath: cbt.PathForCBT(vmi),
SubPath: "cbt",
})
}
return nil
}
}
func withSidecarVolumes(hookSidecars hooks.HookSidecarList) VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
if len(hookSidecars) != 0 {
renderer.podVolumes = append(renderer.podVolumes, emptyDirVolume(hookSidecarSocks))
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: hookSidecarSocks,
MountPath: hooks.HookSocketsSharedDirectory,
})
}
return nil
}
}
func withVirioFS() VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
renderer.podVolumeMounts = append(renderer.podVolumeMounts, mountPath(virtiofs.VirtioFSContainers, virtiofs.VirtioFSContainersMountBaseDir))
renderer.podVolumes = append(renderer.podVolumes, emptyDirVolume(virtiofs.VirtioFSContainers))
return nil
}
}
func withHugepages() VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
hugepagesBasePath := "/dev/hugepages"
renderer.podVolumes = append(renderer.podVolumes, k8sv1.Volume{
Name: "hugepages",
VolumeSource: k8sv1.VolumeSource{
EmptyDir: &k8sv1.EmptyDirVolumeSource{
Medium: k8sv1.StorageMediumHugePages,
},
},
})
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: "hugepages",
MountPath: hugepagesBasePath,
})
renderer.podVolumes = append(renderer.podVolumes, k8sv1.Volume{
Name: "hugetblfs-dir",
VolumeSource: k8sv1.VolumeSource{
EmptyDir: &k8sv1.EmptyDirVolumeSource{},
},
})
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: "hugetblfs-dir",
MountPath: filepath.Join(hugepagesBasePath, "libvirt/qemu"),
})
return nil
}
}
func withHotplugSupport(hotplugDiskDir string) VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
prop := k8sv1.MountPropagationHostToContainer
renderer.podVolumeMounts = append(renderer.podVolumeMounts, k8sv1.VolumeMount{
Name: hotplugDisks,
MountPath: hotplugDiskDir,
MountPropagation: &prop,
})
renderer.podVolumes = append(renderer.podVolumes, emptyDirVolume(hotplugDisks))
return nil
}
}
func withNetworkDeviceInfoMapAnnotation() VolumeRendererOption {
return func(renderer *VolumeRenderer) error {
renderer.podVolumes = append(renderer.podVolumes,
downwardAPIDirVolume(
downwardapi.NetworkInfoVolumeName, downwardapi.NetworkInfoVolumePath, fmt.Sprintf("metadata.annotations['%s']", downwardapi.NetworkInfoAnnot)),
)
return nil
}
}
func imgPullSecrets(volumes ...v1.Volume) []k8sv1.LocalObjectReference {
var imagePullSecrets []k8sv1.LocalObjectReference
for _, volume := range volumes {
if volume.ContainerDisk != nil && volume.ContainerDisk.ImagePullSecret != "" {
imagePullSecrets = appendUniqueImagePullSecret(imagePullSecrets, k8sv1.LocalObjectReference{
Name: volume.ContainerDisk.ImagePullSecret,
})
}
}
return imagePullSecrets
}
func serviceAccount(volumes ...v1.Volume) string {
for _, volume := range volumes {
if volume.ServiceAccount != nil {
return volume.ServiceAccount.ServiceAccountName
}
}
return ""
}
func (vr *VolumeRenderer) addPVCToLaunchManifest(pvcStore cache.Store, volume v1.Volume, claimName string) error {
logger := log.DefaultLogger()
pvc, exists, isBlock, err := types.IsPVCBlockFromStore(pvcStore, vr.namespace, claimName)
if err != nil {
logger.Errorf("error getting PVC: %v", claimName)
return err
} else if !exists {
logger.Errorf("didn't find PVC %v", claimName)
return types.PvcNotFoundError{Reason: fmt.Sprintf("didn't find PVC %v", claimName)}
} else if isBlock {
devicePath := filepath.Join(string(filepath.Separator), "dev", volume.Name)
device := k8sv1.VolumeDevice{
Name: volume.Name,
DevicePath: devicePath,
}
vr.volumeDevices = append(vr.volumeDevices, device)
} else {
path := hostdisk.GetMountedHostDiskDir(volume.Name)
volumeMount := k8sv1.VolumeMount{
Name: volume.Name,
MountPath: path,
}
vr.podVolumeMounts = append(vr.podVolumeMounts, volumeMount)
if types.HasSharedAccessMode(pvc.Spec.AccessModes) {
vr.sharedFilesystemPaths = append(vr.sharedFilesystemPaths, path)
}
}
return nil
}
func (vr *VolumeRenderer) handlePVCVolume(volume v1.Volume, pvcStore cache.Store) error {
claimName := volume.PersistentVolumeClaim.ClaimName
if err := vr.addPVCToLaunchManifest(pvcStore, volume, claimName); err != nil {
return err
}
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: volume.PersistentVolumeClaim.ClaimName,
ReadOnly: volume.PersistentVolumeClaim.ReadOnly,
},
},
})
return nil
}
func (vr *VolumeRenderer) handleEphemeralVolume(volume v1.Volume, pvcStore cache.Store) error {
claimName := volume.Ephemeral.PersistentVolumeClaim.ClaimName
if err := vr.addPVCToLaunchManifest(pvcStore, volume, claimName); err != nil {
return err
}
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
PersistentVolumeClaim: volume.Ephemeral.PersistentVolumeClaim,
},
})
return nil
}
func (vr *VolumeRenderer) handleDataVolume(volume v1.Volume, pvcStore cache.Store) error {
claimName := volume.DataVolume.Name
if err := vr.addPVCToLaunchManifest(pvcStore, volume, claimName); err != nil {
return err
}
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
},
})
return nil
}
func (vr *VolumeRenderer) handleHostDisk(volume v1.Volume) {
var hostPathType k8sv1.HostPathType
switch hostType := volume.HostDisk.Type; hostType {
case v1.HostDiskExists:
hostPathType = k8sv1.HostPathDirectory
case v1.HostDiskExistsOrCreate:
hostPathType = k8sv1.HostPathDirectoryOrCreate
}
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: hostdisk.GetMountedHostDiskDir(volume.Name),
})
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
HostPath: &k8sv1.HostPathVolumeSource{
Path: filepath.Dir(volume.HostDisk.Path),
Type: &hostPathType,
},
},
})
}
func (vr *VolumeRenderer) addSecretVolume(volume v1.Volume) {
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
Secret: &k8sv1.SecretVolumeSource{
SecretName: volume.Secret.SecretName,
Optional: volume.Secret.Optional,
},
},
})
}
func (vr *VolumeRenderer) addSecretVolumeMount(volume v1.Volume) {
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: config.GetSecretSourcePath(volume.Name),
ReadOnly: true,
})
}
func (vr *VolumeRenderer) addConfigMapVolume(volume v1.Volume) {
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
ConfigMap: &k8sv1.ConfigMapVolumeSource{
LocalObjectReference: volume.ConfigMap.LocalObjectReference,
Optional: volume.ConfigMap.Optional,
},
},
})
}
func (vr *VolumeRenderer) addConfigMapVolumeMount(volume v1.Volume) {
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: config.GetConfigMapSourcePath(volume.Name),
ReadOnly: true,
})
}
func (vr *VolumeRenderer) addDownwardAPIVolume(volume v1.Volume) {
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
DownwardAPI: &k8sv1.DownwardAPIVolumeSource{
Items: volume.DownwardAPI.Fields,
},
},
})
}
func (vr *VolumeRenderer) addDownwardAPIVolumeMount(volume v1.Volume) {
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: config.GetDownwardAPISourcePath(volume.Name),
ReadOnly: true,
})
}
func (vr *VolumeRenderer) addContainerDiskVolume(volume v1.Volume) {
diskContainerImage := volume.ContainerDisk.Image
if img, exists := vr.imageIDs[volume.Name]; exists {
diskContainerImage = img
}
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
Image: &k8sv1.ImageVolumeSource{
Reference: diskContainerImage,
PullPolicy: volume.ContainerDisk.ImagePullPolicy,
},
},
})
}
func (vr *VolumeRenderer) addContainerDiskVolumeMount(volume v1.Volume, volumeIndex int) {
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: filepath.Join(filepath.Join(util.VirtImageVolumeDir), fmt.Sprintf("disk_%d", volumeIndex)),
ReadOnly: true,
})
}
func (vr *VolumeRenderer) addKernelBootVolume(kbc *v1.KernelBootContainer) {
kernelBootContainerImage := kbc.Image
if img, exists := vr.imageIDs[containerdisk.KernelBootVolumeName]; exists {
kernelBootContainerImage = img
}
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: containerdisk.KernelBootVolumeName,
VolumeSource: k8sv1.VolumeSource{
Image: &k8sv1.ImageVolumeSource{
Reference: kernelBootContainerImage,
PullPolicy: kbc.ImagePullPolicy,
},
},
})
}
func (vr *VolumeRenderer) addKernelBootVolumeMount() {
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: containerdisk.KernelBootVolumeName,
MountPath: util.VirtKernelBootVolumeDir,
ReadOnly: true,
})
}
func (vr *VolumeRenderer) addLauncherBinaryVolume() {
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: containerdisk.LauncherVolume,
VolumeSource: k8sv1.VolumeSource{
Image: &k8sv1.ImageVolumeSource{
Reference: vr.launcherImage,
PullPolicy: vr.clusterConfig.GetImagePullPolicy(),
},
},
})
}
func (vr *VolumeRenderer) handleCloudInitNoCloud(volume v1.Volume) {
if volume.CloudInitNoCloud.UserDataSecretRef != nil {
// attach a secret referenced by the user
volumeName := volume.Name + "-udata"
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volumeName,
VolumeSource: k8sv1.VolumeSource{
Secret: &k8sv1.SecretVolumeSource{
SecretName: volume.CloudInitNoCloud.UserDataSecretRef.Name,
},
},
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volume.Name, "userdata"),
SubPath: "userdata",
ReadOnly: true,
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volume.Name, "userData"),
SubPath: "userData",
ReadOnly: true,
})
}
if volume.CloudInitNoCloud.NetworkDataSecretRef != nil {
// attach a secret referenced by the networkdata
volumeName := volume.Name + "-ndata"
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volumeName,
VolumeSource: k8sv1.VolumeSource{
Secret: &k8sv1.SecretVolumeSource{
SecretName: volume.CloudInitNoCloud.NetworkDataSecretRef.Name,
},
},
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volume.Name, "networkdata"),
SubPath: "networkdata",
ReadOnly: true,
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(config.SecretSourceDir, volume.Name, "networkData"),
SubPath: "networkData",
ReadOnly: true,
})
}
}
func (vr *VolumeRenderer) handleDownwardMetrics(volume v1.Volume) {
sizeLimit := resource.MustParse("1Mi")
vr.podVolumes = append(vr.podVolumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
EmptyDir: &k8sv1.EmptyDirVolumeSource{
Medium: "Memory",
SizeLimit: &sizeLimit,
},
},
})
vr.podVolumeMounts = append(vr.podVolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: config.DownwardMetricDisksDir,
})
}
// shouldAddLauncherBinaryVolume decides if we need to add the launcher image volume.
// Even if only one init container is required for digest extraction, we must add the
// volume that contains the binary used by that init container. Without this volume,
// the init container cannot run.
func shouldAddLauncherBinaryVolume(vmi *v1.VirtualMachineInstance, imageIDs map[string]string) bool {
for _, volume := range vmi.Spec.Volumes {
containerDiskImageIDAlreadyExists := strings.Contains(imageIDs[volume.Name], "@sha256:")
if volume.ContainerDisk == nil || containerDiskImageIDAlreadyExists {
continue
}
return true
}
kernelBootImageIDAlreadyExists := strings.Contains(imageIDs[containerdisk.KernelBootVolumeName], "@sha256:")
return util.HasKernelBootContainerImage(vmi) && !kernelBootImageIDAlreadyExists
}
package services
import (
"fmt"
"kubevirt.io/kubevirt/pkg/pointer"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
func generateSerialConsoleLogContainer(vmi *v1.VirtualMachineInstance, image string, config *virtconfig.ClusterConfig, virtLauncherLogVerbosity uint) *k8sv1.Container {
const serialPort = 0
if isSerialConsoleLogEnabled(vmi, config) {
logFile := fmt.Sprintf("%s/%s/virt-serial%d-log", util.VirtPrivateDir, vmi.ObjectMeta.UID, serialPort)
resources := resourcesForSerialConsoleLogContainer(vmi.IsCPUDedicated(), vmi.WantsToHaveQOSGuaranteed(), config)
guestConsoleLog := &k8sv1.Container{
Name: "guest-console-log",
Image: image,
ImagePullPolicy: config.GetImagePullPolicy(),
Command: []string{"/usr/bin/virt-tail"},
Args: []string{"--logfile", logFile},
VolumeMounts: []k8sv1.VolumeMount{
{
Name: "private",
MountPath: util.VirtPrivateDir,
ReadOnly: true,
},
},
Resources: resources,
SecurityContext: &k8sv1.SecurityContext{
RunAsUser: pointer.P(int64(util.NonRootUID)),
RunAsNonRoot: pointer.P(true),
AllowPrivilegeEscalation: pointer.P(false),
Capabilities: &k8sv1.Capabilities{
Drop: []k8sv1.Capability{"ALL"},
},
},
RestartPolicy: pointer.P(k8sv1.ContainerRestartPolicyAlways),
}
guestConsoleLog.Env = append(guestConsoleLog.Env, k8sv1.EnvVar{Name: ENV_VAR_VIRT_LAUNCHER_LOG_VERBOSITY, Value: fmt.Sprint(virtLauncherLogVerbosity)})
return guestConsoleLog
}
return nil
}
func isSerialConsoleLogEnabled(vmi *v1.VirtualMachineInstance, config *virtconfig.ClusterConfig) bool {
if vmi.Spec.Domain.Devices.AutoattachSerialConsole != nil && *vmi.Spec.Domain.Devices.AutoattachSerialConsole == false {
return false
}
if vmi.Spec.Domain.Devices.LogSerialConsole != nil {
return *vmi.Spec.Domain.Devices.LogSerialConsole
}
return !config.IsSerialConsoleLogDisabled()
}
func resourcesForSerialConsoleLogContainer(dedicatedCPUs bool, guaranteedQOS bool, config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
resources := k8sv1.ResourceRequirements{Requests: k8sv1.ResourceList{}, Limits: k8sv1.ResourceList{}}
resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("35M")
if reqMem := config.GetSupportContainerRequest(v1.GuestConsoleLog, k8sv1.ResourceMemory); reqMem != nil {
resources.Requests[k8sv1.ResourceMemory] = *reqMem
}
resources.Requests[k8sv1.ResourceCPU] = resource.MustParse("5m")
if reqCpu := config.GetSupportContainerRequest(v1.GuestConsoleLog, k8sv1.ResourceCPU); reqCpu != nil {
resources.Requests[k8sv1.ResourceCPU] = *reqCpu
}
resources.Limits[k8sv1.ResourceMemory] = resource.MustParse("60M")
if limMem := config.GetSupportContainerLimit(v1.GuestConsoleLog, k8sv1.ResourceMemory); limMem != nil {
resources.Limits[k8sv1.ResourceMemory] = *limMem
}
resources.Limits[k8sv1.ResourceCPU] = resource.MustParse("15m")
if limCpu := config.GetSupportContainerLimit(v1.GuestConsoleLog, k8sv1.ResourceCPU); limCpu != nil {
resources.Limits[k8sv1.ResourceCPU] = *limCpu
}
if dedicatedCPUs || guaranteedQOS {
resources.Requests[k8sv1.ResourceCPU] = resources.Limits[k8sv1.ResourceCPU]
resources.Requests[k8sv1.ResourceMemory] = resources.Limits[k8sv1.ResourceMemory]
}
return resources
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package services
import (
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/hooks"
)
type SidecarCreatorFunc func(*v1.VirtualMachineInstance, *v1.KubeVirtConfiguration) (hooks.HookSidecarList, error)
func WithSidecarCreator(sidecarCreator SidecarCreatorFunc) templateServiceOption {
return func(t *TemplateService) {
t.sidecarCreators = append(t.sidecarCreators, sidecarCreator)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package services
import (
"context"
"fmt"
"maps"
"math/rand"
"os"
"strconv"
"strings"
"github.com/openshift/library-go/pkg/build/naming"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/client-go/tools/cache"
"k8s.io/kubectl/pkg/cmd/util/podcmd"
v1 "kubevirt.io/api/core/v1"
exportv1 "kubevirt.io/api/export/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/client-go/precond"
drautil "kubevirt.io/kubevirt/pkg/dra"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/apimachinery"
containerdisk "kubevirt.io/kubevirt/pkg/container-disk"
"kubevirt.io/kubevirt/pkg/hooks"
"kubevirt.io/kubevirt/pkg/network/downwardapi"
"kubevirt.io/kubevirt/pkg/network/istio"
"kubevirt.io/kubevirt/pkg/network/multus"
"kubevirt.io/kubevirt/pkg/network/vmispec"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
"kubevirt.io/kubevirt/pkg/storage/reservation"
"kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/util"
"kubevirt.io/kubevirt/pkg/util/net/dns"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/descheduler"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
operatorutil "kubevirt.io/kubevirt/pkg/virt-operator/util"
)
const (
containerDisks = "container-disks"
hotplugDisks = "hotplug-disks"
hookSidecarSocks = "hook-sidecar-sockets"
varRun = "/var/run"
virtBinDir = "virt-bin-share-dir"
hotplugDisk = "hotplug-disk"
virtExporter = "virt-exporter"
)
const KvmDevice = "devices.kubevirt.io/kvm"
const TunDevice = "devices.kubevirt.io/tun"
const VhostNetDevice = "devices.kubevirt.io/vhost-net"
const SevDevice = "devices.kubevirt.io/sev"
const VhostVsockDevice = "devices.kubevirt.io/vhost-vsock"
const PrDevice = "devices.kubevirt.io/pr-helper"
const debugLogs = "debugLogs"
const logVerbosity = "logVerbosity"
const virtiofsDebugLogs = "virtiofsdDebugLogs"
const qemuTimeoutJitterRange = 120
const (
CAP_NET_BIND_SERVICE = "NET_BIND_SERVICE"
CAP_SYS_NICE = "SYS_NICE"
)
// LibvirtStartupDelay is added to custom liveness and readiness probes initial delay value.
// Libvirt needs roughly 10 seconds to start.
const LibvirtStartupDelay = 10
const IntelVendorName = "Intel"
const ENV_VAR_LIBVIRT_DEBUG_LOGS = "LIBVIRT_DEBUG_LOGS"
const ENV_VAR_VIRTIOFSD_DEBUG_LOGS = "VIRTIOFSD_DEBUG_LOGS"
const ENV_VAR_VIRT_LAUNCHER_LOG_VERBOSITY = "VIRT_LAUNCHER_LOG_VERBOSITY"
const ENV_VAR_SHARED_FILESYSTEM_PATHS = "SHARED_FILESYSTEM_PATHS"
const ENV_VAR_POD_NAME = "POD_NAME"
// extensive log verbosity threshold after which libvirt debug logs will be enabled
const EXT_LOG_VERBOSITY_THRESHOLD = 5
const ephemeralStorageOverheadSize = "50M"
const (
VirtLauncherMonitorOverhead = "25Mi" // The `ps` RSS for virt-launcher-monitor
VirtLauncherOverhead = "100Mi" // The `ps` RSS for the virt-launcher process
VirtlogdOverhead = "25Mi" // The `ps` RSS for virtlogd
VirtqemudOverhead = "40Mi" // The `ps` RSS for virtqemud
QemuOverhead = "30Mi" // The `ps` RSS for qemu, minus the RAM of its (stressed) guest, minus the virtual page table
// Default: limits.memory = 2*requests.memory
DefaultMemoryLimitOverheadRatio = float64(2.0)
FailedToRenderLaunchManifestErrFormat = "failed to render launch manifest: %v"
)
type netBindingPluginMemoryCalculator interface {
Calculate(vmi *v1.VirtualMachineInstance, registeredPlugins map[string]v1.InterfaceBindingPlugin) resource.Quantity
}
type annotationsGenerator interface {
Generate(vmi *v1.VirtualMachineInstance) (map[string]string, error)
}
type targetAnnotationsGenerator interface {
GenerateFromSource(vmi *v1.VirtualMachineInstance, sourcePod *k8sv1.Pod) (map[string]string, error)
}
type TemplateService struct {
launcherImage string
exporterImage string
launcherQemuTimeout int
virtShareDir string
ephemeralDiskDir string
containerDiskDir string
hotplugDiskDir string
imagePullSecret string
persistentVolumeClaimStore cache.Store
virtClient kubecli.KubevirtClient
clusterConfig *virtconfig.ClusterConfig
launcherSubGid int64
resourceQuotaStore cache.Store
namespaceStore cache.Store
sidecarCreators []SidecarCreatorFunc
netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator
annotationsGenerators []annotationsGenerator
netTargetAnnotationsGenerator targetAnnotationsGenerator
}
func isFeatureStateEnabled(fs *v1.FeatureState) bool {
return fs != nil && fs.Enabled != nil && *fs.Enabled
}
func setNodeAffinityForPod(vmi *v1.VirtualMachineInstance, pod *k8sv1.Pod) {
setNodeAffinityForHostModelCpuModel(vmi, pod)
setNodeAffinityForbiddenFeaturePolicy(vmi, pod)
}
func setNodeAffinityForHostModelCpuModel(vmi *v1.VirtualMachineInstance, pod *k8sv1.Pod) {
if vmi.Spec.Domain.CPU == nil || vmi.Spec.Domain.CPU.Model == "" || vmi.Spec.Domain.CPU.Model == v1.CPUModeHostModel {
pod.Spec.Affinity = modifyNodeAffintyToRejectLabel(pod.Spec.Affinity, v1.NodeHostModelIsObsoleteLabel)
}
}
func setNodeAffinityForbiddenFeaturePolicy(vmi *v1.VirtualMachineInstance, pod *k8sv1.Pod) {
if vmi.Spec.Domain.CPU == nil || vmi.Spec.Domain.CPU.Features == nil {
return
}
for _, feature := range vmi.Spec.Domain.CPU.Features {
if feature.Policy == "forbid" {
pod.Spec.Affinity = modifyNodeAffintyToRejectLabel(pod.Spec.Affinity, v1.CPUFeatureLabel+feature.Name)
}
}
}
func modifyNodeAffintyToRejectLabel(origAffinity *k8sv1.Affinity, labelToReject string) *k8sv1.Affinity {
affinity := origAffinity.DeepCopy()
requirement := k8sv1.NodeSelectorRequirement{
Key: labelToReject,
Operator: k8sv1.NodeSelectorOpDoesNotExist,
}
term := k8sv1.NodeSelectorTerm{
MatchExpressions: []k8sv1.NodeSelectorRequirement{requirement}}
nodeAffinity := &k8sv1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{
NodeSelectorTerms: []k8sv1.NodeSelectorTerm{term},
},
}
if affinity != nil && affinity.NodeAffinity != nil {
if affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
terms := affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
// Since NodeSelectorTerms are ORed , the anti affinity requirement will be added to each term.
for i, selectorTerm := range terms {
affinity.NodeAffinity.
RequiredDuringSchedulingIgnoredDuringExecution.
NodeSelectorTerms[i].MatchExpressions = append(selectorTerm.MatchExpressions, requirement)
}
} else {
affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &k8sv1.NodeSelector{
NodeSelectorTerms: []k8sv1.NodeSelectorTerm{term},
}
}
} else if affinity != nil {
affinity.NodeAffinity = nodeAffinity
} else {
affinity = &k8sv1.Affinity{
NodeAffinity: nodeAffinity,
}
}
return affinity
}
func sysprepVolumeSource(sysprepVolume v1.SysprepSource) (k8sv1.VolumeSource, error) {
logger := log.DefaultLogger()
if sysprepVolume.Secret != nil {
return k8sv1.VolumeSource{
Secret: &k8sv1.SecretVolumeSource{
SecretName: sysprepVolume.Secret.Name,
},
}, nil
} else if sysprepVolume.ConfigMap != nil {
return k8sv1.VolumeSource{
ConfigMap: &k8sv1.ConfigMapVolumeSource{
LocalObjectReference: k8sv1.LocalObjectReference{
Name: sysprepVolume.ConfigMap.Name,
},
},
}, nil
}
errorStr := fmt.Sprintf("Sysprep must have Secret or ConfigMap reference set %v", sysprepVolume)
logger.Errorf("%s", errorStr)
return k8sv1.VolumeSource{}, fmt.Errorf("%s", errorStr)
}
func (t *TemplateService) GetLauncherImage() string {
return t.launcherImage
}
func (t *TemplateService) RenderLaunchManifestNoVm(vmi *v1.VirtualMachineInstance) (*k8sv1.Pod, error) {
backendStoragePVCName := ""
if backendstorage.IsBackendStorageNeeded(vmi) {
backendStoragePVC := backendstorage.PVCForVMI(t.persistentVolumeClaimStore, vmi)
if backendStoragePVC == nil {
return nil, fmt.Errorf("can't generate manifest without backend-storage PVC, waiting for the PVC to be created")
}
backendStoragePVCName = backendStoragePVC.Name
}
return t.renderLaunchManifest(vmi, nil, backendStoragePVCName, true)
}
func (t *TemplateService) RenderMigrationManifest(vmi *v1.VirtualMachineInstance, migration *v1.VirtualMachineInstanceMigration, sourcePod *k8sv1.Pod) (*k8sv1.Pod, error) {
reproducibleImageIDs, err := containerdisk.ExtractImageIDsFromSourcePod(vmi, sourcePod, t.clusterConfig.ImageVolumeEnabled())
if err != nil {
return nil, fmt.Errorf("can not proceed with the migration when no reproducible image digest can be detected: %v", err)
}
backendStoragePVCName := ""
if backendstorage.IsBackendStorageNeeded(vmi) {
backendStoragePVC := backendstorage.PVCForMigrationTarget(t.persistentVolumeClaimStore, migration)
if backendStoragePVC == nil {
return nil, fmt.Errorf("can't generate manifest without backend-storage PVC, waiting for the PVC to be created")
}
backendStoragePVCName = backendStoragePVC.Name
}
targetPod, err := t.renderLaunchManifest(vmi, reproducibleImageIDs, backendStoragePVCName, false)
if err != nil {
return nil, err
}
if t.netTargetAnnotationsGenerator != nil {
netAnnotations, err := t.netTargetAnnotationsGenerator.GenerateFromSource(vmi, sourcePod)
if err != nil {
return nil, err
}
maps.Copy(targetPod.Annotations, netAnnotations)
}
return targetPod, err
}
func (t *TemplateService) RenderLaunchManifest(vmi *v1.VirtualMachineInstance) (*k8sv1.Pod, error) {
backendStoragePVCName := ""
if backendstorage.IsBackendStorageNeeded(vmi) {
backendStoragePVC := backendstorage.PVCForVMI(t.persistentVolumeClaimStore, vmi)
if backendStoragePVC == nil {
return nil, fmt.Errorf("can't generate manifest without backend-storage PVC, waiting for the PVC to be created")
}
backendStoragePVCName = backendStoragePVC.Name
}
return t.renderLaunchManifest(vmi, nil, backendStoragePVCName, false)
}
func generateQemuTimeoutWithJitter(qemuTimeoutBaseSeconds int) string {
timeout := rand.Intn(qemuTimeoutJitterRange) + qemuTimeoutBaseSeconds
return fmt.Sprintf("%ds", timeout)
}
func computePodSecurityContext(vmi *v1.VirtualMachineInstance, seccomp *k8sv1.SeccompProfile) *k8sv1.PodSecurityContext {
psc := &k8sv1.PodSecurityContext{}
// virtiofs container will run unprivileged even if the pod runs as root,
// so we need to allow the NonRootUID for virtiofsd to be able to write into the PVC
psc.FSGroup = pointer.P(int64(util.NonRootUID))
if util.IsNonRootVMI(vmi) {
nonRootUser := int64(util.NonRootUID)
psc.RunAsUser = &nonRootUser
psc.RunAsGroup = &nonRootUser
psc.RunAsNonRoot = pointer.P(true)
} else {
rootUser := int64(util.RootUser)
psc.RunAsUser = &rootUser
}
psc.SeccompProfile = seccomp
return psc
}
func (t *TemplateService) renderLaunchManifest(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, backendStoragePVCName string, tempPod bool) (*k8sv1.Pod, error) {
precond.MustNotBeNil(vmi)
domain := precond.MustNotBeEmpty(vmi.GetObjectMeta().GetName())
namespace := precond.MustNotBeEmpty(vmi.GetObjectMeta().GetNamespace())
var userId int64 = util.RootUser
nonRoot := util.IsNonRootVMI(vmi)
if nonRoot {
userId = util.NonRootUID
}
// Pad the virt-launcher grace period.
// Ideally we want virt-handler to handle tearing down
// the vmi without virt-launcher's termination forcing
// the vmi down.
const gracePeriodPaddingSeconds int64 = 15
gracePeriodSeconds := gracePeriodInSeconds(vmi) + gracePeriodPaddingSeconds
gracePeriodKillAfter := gracePeriodSeconds + gracePeriodPaddingSeconds
imagePullSecrets := imgPullSecrets(vmi.Spec.Volumes...)
if util.HasKernelBootContainerImage(vmi) && vmi.Spec.Domain.Firmware.KernelBoot.Container.ImagePullSecret != "" {
imagePullSecrets = appendUniqueImagePullSecret(imagePullSecrets, k8sv1.LocalObjectReference{
Name: vmi.Spec.Domain.Firmware.KernelBoot.Container.ImagePullSecret,
})
}
if t.imagePullSecret != "" {
imagePullSecrets = appendUniqueImagePullSecret(imagePullSecrets, k8sv1.LocalObjectReference{
Name: t.imagePullSecret,
})
}
networkToResourceMap, err := multus.NetworkToResource(t.virtClient, vmi)
if err != nil {
return nil, err
}
resourceRenderer, err := t.newResourceRenderer(vmi, networkToResourceMap)
if err != nil {
return nil, err
}
resources := resourceRenderer.ResourceRequirements()
ovmfPath := t.clusterConfig.GetOVMFPath(vmi.Spec.Architecture)
var requestedHookSidecarList hooks.HookSidecarList
for _, sidecarCreator := range t.sidecarCreators {
sidecars, err := sidecarCreator(vmi, t.clusterConfig.GetConfig())
if err != nil {
return nil, err
}
requestedHookSidecarList = append(requestedHookSidecarList, sidecars...)
}
var command []string
if tempPod {
logger := log.DefaultLogger()
logger.Infof("RUNNING doppleganger pod for %s", vmi.Name)
command = []string{"/bin/bash",
"-c",
"echo", "bound PVCs"}
} else {
command = []string{"/usr/bin/virt-launcher-monitor",
"--qemu-timeout", generateQemuTimeoutWithJitter(t.launcherQemuTimeout),
"--name", domain,
"--uid", string(vmi.UID),
"--namespace", namespace,
"--kubevirt-share-dir", t.virtShareDir,
"--ephemeral-disk-dir", t.ephemeralDiskDir,
"--container-disk-dir", t.containerDiskDir,
"--grace-period-seconds", strconv.Itoa(int(gracePeriodSeconds)),
"--hook-sidecars", strconv.Itoa(len(requestedHookSidecarList)),
"--ovmf-path", ovmfPath,
"--disk-memory-limit", strconv.Itoa(int(t.clusterConfig.GetDiskVerification().MemoryLimit.Value())),
}
if nonRoot {
command = append(command, "--run-as-nonroot")
}
if t.clusterConfig.ImageVolumeEnabled() {
command = append(command, "--image-volume")
}
if customDebugFilters, exists := vmi.Annotations[v1.CustomLibvirtLogFiltersAnnotation]; exists {
log.Log.Object(vmi).Infof("Applying custom debug filters for vmi %s: %s", vmi.Name, customDebugFilters)
command = append(command, "--libvirt-log-filters", customDebugFilters)
}
}
if t.clusterConfig.AllowEmulation() {
command = append(command, "--allow-emulation")
}
if checkForKeepLauncherAfterFailure(vmi) {
command = append(command, "--keep-after-failure")
}
_, ok := vmi.Annotations[v1.FuncTestLauncherFailFastAnnotation]
if ok {
command = append(command, "--simulate-crash")
}
volumeRenderer, err := t.newVolumeRenderer(vmi, imageIDs, namespace, requestedHookSidecarList, backendStoragePVCName)
if err != nil {
return nil, err
}
compute := t.newContainerSpecRenderer(vmi, volumeRenderer, resources, userId).Render(command)
virtLauncherLogVerbosity := t.clusterConfig.GetVirtLauncherVerbosity()
if verbosity, isSet := vmi.Labels[logVerbosity]; isSet || virtLauncherLogVerbosity != virtconfig.DefaultVirtLauncherLogVerbosity {
// Override the cluster wide verbosity level if a specific value has been provided for this VMI
verbosityStr := fmt.Sprint(virtLauncherLogVerbosity)
if isSet {
verbosityStr = verbosity
verbosityInt, err := strconv.Atoi(verbosity)
if err != nil {
return nil, fmt.Errorf("verbosity %s cannot cast to int: %v", verbosity, err)
}
virtLauncherLogVerbosity = uint(verbosityInt)
}
compute.Env = append(compute.Env, k8sv1.EnvVar{Name: ENV_VAR_VIRT_LAUNCHER_LOG_VERBOSITY, Value: verbosityStr})
}
if labelValue, ok := vmi.Labels[debugLogs]; (ok && strings.EqualFold(labelValue, "true")) || virtLauncherLogVerbosity > EXT_LOG_VERBOSITY_THRESHOLD {
compute.Env = append(compute.Env, k8sv1.EnvVar{Name: ENV_VAR_LIBVIRT_DEBUG_LOGS, Value: "1"})
}
if labelValue, ok := vmi.Labels[virtiofsDebugLogs]; (ok && strings.EqualFold(labelValue, "true")) || virtLauncherLogVerbosity > EXT_LOG_VERBOSITY_THRESHOLD {
compute.Env = append(compute.Env, k8sv1.EnvVar{Name: ENV_VAR_VIRTIOFSD_DEBUG_LOGS, Value: "1"})
}
compute.Env = append(compute.Env, k8sv1.EnvVar{
Name: ENV_VAR_POD_NAME,
ValueFrom: &k8sv1.EnvVarSource{
FieldRef: &k8sv1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
})
// Make sure the compute container is always the first since the mutating webhook shipped with the sriov operator
// for adding the requested resources to the pod will add them to the first container of the list
containers := []k8sv1.Container{compute}
if !t.clusterConfig.ImageVolumeEnabled() {
containersDisks := containerdisk.GenerateContainers(vmi, t.clusterConfig, imageIDs, containerDisks, virtBinDir)
containers = append(containers, containersDisks...)
kernelBootContainer := containerdisk.GenerateKernelBootContainer(vmi, t.clusterConfig, imageIDs, containerDisks, virtBinDir)
if kernelBootContainer != nil {
log.Log.Object(vmi).Infof("kernel boot container generated")
containers = append(containers, *kernelBootContainer)
}
}
virtiofsContainers := generateVirtioFSContainers(vmi, t.launcherImage, t.clusterConfig)
if virtiofsContainers != nil {
containers = append(containers, virtiofsContainers...)
}
var sidecarVolumes []k8sv1.Volume
for i, requestedHookSidecar := range requestedHookSidecarList {
sidecarContainer := newSidecarContainerRenderer(
sidecarContainerName(i), vmi, sidecarResources(vmi, t.clusterConfig), requestedHookSidecar, userId).Render(requestedHookSidecar.Command)
if requestedHookSidecar.ConfigMap != nil {
cm, err := t.virtClient.CoreV1().ConfigMaps(vmi.Namespace).Get(context.TODO(), requestedHookSidecar.ConfigMap.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
volumeSource := k8sv1.VolumeSource{
ConfigMap: &k8sv1.ConfigMapVolumeSource{
LocalObjectReference: k8sv1.LocalObjectReference{Name: cm.Name},
DefaultMode: pointer.P(int32(0755)),
},
}
vol := k8sv1.Volume{
Name: cm.Name,
VolumeSource: volumeSource,
}
sidecarVolumes = append(sidecarVolumes, vol)
}
if requestedHookSidecar.PVC != nil {
volumeSource := k8sv1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: requestedHookSidecar.PVC.Name,
},
}
vol := k8sv1.Volume{
Name: requestedHookSidecar.PVC.Name,
VolumeSource: volumeSource,
}
sidecarVolumes = append(sidecarVolumes, vol)
if requestedHookSidecar.PVC.SharedComputePath != "" {
containers[0].VolumeMounts = append(containers[0].VolumeMounts,
k8sv1.VolumeMount{
Name: requestedHookSidecar.PVC.Name,
MountPath: requestedHookSidecar.PVC.SharedComputePath,
})
}
}
containers = append(containers, sidecarContainer)
}
podAnnotations, err := t.generatePodAnnotations(vmi)
if err != nil {
return nil, err
}
if tempPod {
// mark pod as temp - only used for provisioning
podAnnotations[v1.EphemeralProvisioningObject] = "true"
}
var initContainers []k8sv1.Container
sconsolelogContainer := generateSerialConsoleLogContainer(vmi, t.launcherImage, t.clusterConfig, virtLauncherLogVerbosity)
if sconsolelogContainer != nil {
initContainers = append(initContainers, *sconsolelogContainer)
}
if !t.clusterConfig.ImageVolumeEnabled() && (HaveContainerDiskVolume(vmi.Spec.Volumes) || util.HasKernelBootContainerImage(vmi)) {
initContainerCommand := []string{"/usr/bin/cp", "--preserve=all",
"/usr/bin/container-disk",
"/init/usr/bin/container-disk",
}
initContainers = append(
initContainers,
t.newInitContainerRenderer(vmi,
initContainerVolumeMount(),
initContainerResourceRequirementsForVMI(vmi, v1.ContainerDisk, t.clusterConfig),
userId).Render(initContainerCommand))
// this causes containerDisks to be pre-pulled before virt-launcher starts.
initContainers = append(initContainers, containerdisk.GenerateInitContainers(vmi, t.clusterConfig, imageIDs, containerDisks, virtBinDir)...)
kernelBootInitContainer := containerdisk.GenerateKernelBootInitContainer(vmi, t.clusterConfig, imageIDs, containerDisks, virtBinDir)
if kernelBootInitContainer != nil {
initContainers = append(initContainers, *kernelBootInitContainer)
}
} else if t.clusterConfig.ImageVolumeEnabled() {
// TODO: Once the KEP https://github.com/kubernetes/enhancements/pull/5375 is fully implemented and stable
// in all Kubernetes versions supported by KubeVirt, this entire init containers logic should be removed,
// and the digest can be fetched directly from the Pod volume status.
// Generate init containers for regular volumes
for _, volume := range vmi.Spec.Volumes {
containerDiskImageIDAlreadyExists := strings.Contains(imageIDs[volume.Name], "@sha256:")
if volume.ContainerDisk == nil || containerDiskImageIDAlreadyExists {
continue
}
initContainer := containerdisk.CreateImageVolumeInitContainer(
vmi,
t.clusterConfig,
volume.Name,
volume.ContainerDisk.Image,
volume.ContainerDisk.ImagePullPolicy,
)
initContainers = append(initContainers, initContainer)
}
// Generate init container for kernel boot if needed
kernelBootImageIDAlreadyExists := strings.Contains(imageIDs[containerdisk.KernelBootVolumeName], "@sha256:")
if util.HasKernelBootContainerImage(vmi) && !kernelBootImageIDAlreadyExists {
kernelBootContainer := vmi.Spec.Domain.Firmware.KernelBoot.Container
initContainer := containerdisk.CreateImageVolumeInitContainer(
vmi,
t.clusterConfig,
containerdisk.KernelBootVolumeName,
kernelBootContainer.Image,
kernelBootContainer.ImagePullPolicy,
)
initContainers = append(initContainers, initContainer)
}
}
hostName := dns.SanitizeHostname(vmi)
enableServiceLinks := false
var podSeccompProfile *k8sv1.SeccompProfile = nil
if seccompConf := t.clusterConfig.GetConfig().SeccompConfiguration; seccompConf != nil && seccompConf.VirtualMachineInstanceProfile != nil {
vmProfile := seccompConf.VirtualMachineInstanceProfile
if customProfile := vmProfile.CustomProfile; customProfile != nil {
if customProfile.LocalhostProfile != nil {
podSeccompProfile = &k8sv1.SeccompProfile{
Type: k8sv1.SeccompProfileTypeLocalhost,
LocalhostProfile: customProfile.LocalhostProfile,
}
} else if customProfile.RuntimeDefaultProfile {
podSeccompProfile = &k8sv1.SeccompProfile{
Type: k8sv1.SeccompProfileTypeRuntimeDefault,
}
}
}
}
pod := k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "virt-launcher-" + domain + "-",
Labels: podLabels(vmi, hostName),
Annotations: podAnnotations,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(vmi, v1.VirtualMachineInstanceGroupVersionKind),
},
},
Spec: k8sv1.PodSpec{
Hostname: hostName,
Subdomain: vmi.Spec.Subdomain,
SecurityContext: computePodSecurityContext(vmi, podSeccompProfile),
TerminationGracePeriodSeconds: &gracePeriodKillAfter,
RestartPolicy: k8sv1.RestartPolicyNever,
Containers: containers,
InitContainers: initContainers,
NodeSelector: t.newNodeSelectorRenderer(vmi).Render(),
Volumes: volumeRenderer.Volumes(),
ImagePullSecrets: imagePullSecrets,
DNSConfig: vmi.Spec.DNSConfig,
DNSPolicy: vmi.Spec.DNSPolicy,
ReadinessGates: readinessGates(),
EnableServiceLinks: &enableServiceLinks,
SchedulerName: vmi.Spec.SchedulerName,
Tolerations: vmi.Spec.Tolerations,
TopologySpreadConstraints: vmi.Spec.TopologySpreadConstraints,
ResourceClaims: vmi.Spec.ResourceClaims,
},
}
alignPodMultiCategorySecurity(&pod, t.clusterConfig.GetSELinuxLauncherType(), t.clusterConfig.DockerSELinuxMCSWorkaroundEnabled())
// If we have a runtime class specified, use it, otherwise don't set a runtimeClassName
runtimeClassName := t.clusterConfig.GetDefaultRuntimeClass()
if runtimeClassName != "" {
pod.Spec.RuntimeClassName = &runtimeClassName
}
if vmi.Spec.PriorityClassName != "" {
pod.Spec.PriorityClassName = vmi.Spec.PriorityClassName
}
if vmi.Spec.Affinity != nil {
pod.Spec.Affinity = vmi.Spec.Affinity.DeepCopy()
}
setNodeAffinityForPod(vmi, &pod)
serviceAccountName := serviceAccount(vmi.Spec.Volumes...)
if len(serviceAccountName) > 0 {
pod.Spec.ServiceAccountName = serviceAccountName
automount := true
pod.Spec.AutomountServiceAccountToken = &automount
} else if istio.ProxyInjectionEnabled(vmi) {
automount := true
pod.Spec.AutomountServiceAccountToken = &automount
} else {
automount := false
pod.Spec.AutomountServiceAccountToken = &automount
}
pod.Spec.Volumes = append(pod.Spec.Volumes, sidecarVolumes...)
return &pod, nil
}
func (t *TemplateService) newNodeSelectorRenderer(vmi *v1.VirtualMachineInstance) *NodeSelectorRenderer {
var opts []NodeSelectorRendererOption
if vmi.IsCPUDedicated() {
opts = append(opts, WithDedicatedCPU())
}
if t.clusterConfig.HypervStrictCheckEnabled() {
opts = append(opts, WithHyperv(vmi.Spec.Domain.Features))
}
if modelLabel, err := CPUModelLabelFromCPUModel(vmi); err == nil {
opts = append(
opts,
WithModelAndFeatureLabels(modelLabel, CPUFeatureLabelsFromCPUFeatures(vmi)...),
)
}
var machineType string
if vmi.Status.Machine != nil && vmi.Status.Machine.Type != "" {
machineType = vmi.Status.Machine.Type
} else if vmi.Spec.Domain.Machine != nil && vmi.Spec.Domain.Machine.Type != "" {
machineType = vmi.Spec.Domain.Machine.Type
}
if machineType != "" {
opts = append(opts, WithMachineType(machineType))
}
if topology.IsManualTSCFrequencyRequired(vmi) {
opts = append(opts, WithTSCTimer(vmi.Status.TopologyHints.TSCFrequency))
}
if vmi.IsRealtimeEnabled() {
log.Log.V(4).Info("Add realtime node label selector")
opts = append(opts, WithRealtime())
}
if util.IsSEVVMI(vmi) {
log.Log.V(4).Info("Add SEV node label selector")
opts = append(opts, WithSEVSelector())
}
if util.IsSEVESVMI(vmi) {
log.Log.V(4).Info("Add SEV-ES node label selector")
opts = append(opts, WithSEVESSelector())
}
if util.IsSEVSNPVMI(vmi) {
log.Log.V(4).Info("Add SEV-SNP node label selector")
opts = append(opts, WithSEVSNPSelector())
}
if util.IsSecureExecutionVMI(vmi) {
log.Log.V(4).Info("Add Secure Execution node label selector")
opts = append(opts, WithSecureExecutionSelector())
}
if util.IsTDXVMI(vmi) {
log.Log.V(4).Info("Add TDX node label selector")
opts = append(opts, WithTDXSelector())
}
return NewNodeSelectorRenderer(
vmi.Spec.NodeSelector,
t.clusterConfig.GetNodeSelectors(),
vmi.Spec.Architecture,
opts...,
)
}
func initContainerVolumeMount() k8sv1.VolumeMount {
return k8sv1.VolumeMount{
Name: virtBinDir,
MountPath: "/init/usr/bin",
}
}
func newSidecarContainerRenderer(sidecarName string, vmiSpec *v1.VirtualMachineInstance, resources k8sv1.ResourceRequirements, requestedHookSidecar hooks.HookSidecar, userId int64) *ContainerSpecRenderer {
sidecarOpts := []Option{
WithResourceRequirements(resources),
WithArgs(requestedHookSidecar.Args),
WithExtraEnvVars([]k8sv1.EnvVar{
k8sv1.EnvVar{
Name: hooks.ContainerNameEnvVar,
Value: sidecarName,
}}),
}
var mounts []k8sv1.VolumeMount
mounts = append(mounts, sidecarVolumeMount(sidecarName))
if requestedHookSidecar.DownwardAPI == v1.DeviceInfo {
mounts = append(mounts, mountPath(downwardapi.NetworkInfoVolumeName, downwardapi.MountPath))
}
if requestedHookSidecar.ConfigMap != nil {
mounts = append(mounts, configMapVolumeMount(*requestedHookSidecar.ConfigMap))
}
if requestedHookSidecar.PVC != nil {
mounts = append(mounts, pvcVolumeMount(*requestedHookSidecar.PVC))
}
sidecarOpts = append(sidecarOpts, WithVolumeMounts(mounts...))
if util.IsNonRootVMI(vmiSpec) {
sidecarOpts = append(sidecarOpts, WithNonRoot(userId))
sidecarOpts = append(sidecarOpts, WithDropALLCapabilities())
}
if requestedHookSidecar.Image == "" {
requestedHookSidecar.Image = os.Getenv(operatorutil.SidecarShimImageEnvName)
}
return NewContainerSpecRenderer(
sidecarName,
requestedHookSidecar.Image,
requestedHookSidecar.ImagePullPolicy,
sidecarOpts...)
}
func (t *TemplateService) newInitContainerRenderer(vmiSpec *v1.VirtualMachineInstance, initContainerVolumeMount k8sv1.VolumeMount, initContainerResources k8sv1.ResourceRequirements, userId int64) *ContainerSpecRenderer {
const containerDisk = "container-disk-binary"
cpInitContainerOpts := []Option{
WithVolumeMounts(initContainerVolumeMount),
WithResourceRequirements(initContainerResources),
WithNoCapabilities(),
}
if util.IsNonRootVMI(vmiSpec) {
cpInitContainerOpts = append(cpInitContainerOpts, WithNonRoot(userId))
}
return NewContainerSpecRenderer(containerDisk, t.launcherImage, t.clusterConfig.GetImagePullPolicy(), cpInitContainerOpts...)
}
func (t *TemplateService) newContainerSpecRenderer(vmi *v1.VirtualMachineInstance, volumeRenderer *VolumeRenderer, resources k8sv1.ResourceRequirements, userId int64) *ContainerSpecRenderer {
computeContainerOpts := []Option{
WithVolumeDevices(volumeRenderer.VolumeDevices()...),
WithVolumeMounts(volumeRenderer.Mounts()...),
WithSharedFilesystems(volumeRenderer.SharedFilesystemPaths()...),
WithResourceRequirements(resources),
WithPorts(vmi),
WithCapabilities(vmi),
}
if util.IsNonRootVMI(vmi) {
computeContainerOpts = append(computeContainerOpts, WithNonRoot(userId))
computeContainerOpts = append(computeContainerOpts, WithDropALLCapabilities())
}
if vmi.Spec.ReadinessProbe != nil {
computeContainerOpts = append(computeContainerOpts, WithReadinessProbe(vmi))
}
if vmi.Spec.LivenessProbe != nil {
computeContainerOpts = append(computeContainerOpts, WithLivelinessProbe(vmi))
}
const computeContainerName = "compute"
containerRenderer := NewContainerSpecRenderer(
computeContainerName, t.launcherImage, t.clusterConfig.GetImagePullPolicy(), computeContainerOpts...)
return containerRenderer
}
func (t *TemplateService) newVolumeRenderer(vmi *v1.VirtualMachineInstance, imageIDs map[string]string, namespace string, requestedHookSidecarList hooks.HookSidecarList, backendStoragePVCName string) (*VolumeRenderer, error) {
imageVolumeFeatureGateEnabled := t.clusterConfig.ImageVolumeEnabled()
volumeOpts := []VolumeRendererOption{
withVMIConfigVolumes(vmi.Spec.Domain.Devices.Disks, vmi.Spec.Volumes),
withVMIVolumes(t.persistentVolumeClaimStore, vmi.Spec.Volumes, vmi.Status.VolumeStatus),
withAccessCredentials(vmi.Spec.AccessCredentials),
withBackendStorage(vmi, backendStoragePVCName),
}
if imageVolumeFeatureGateEnabled {
volumeOpts = append(volumeOpts, withImageVolumes(vmi))
}
if len(requestedHookSidecarList) != 0 {
volumeOpts = append(volumeOpts, withSidecarVolumes(requestedHookSidecarList))
}
if hasHugePages(vmi) {
volumeOpts = append(volumeOpts, withHugepages())
}
if !vmi.Spec.Domain.Devices.DisableHotplug {
volumeOpts = append(volumeOpts, withHotplugSupport(t.hotplugDiskDir))
}
if vmispec.BindingPluginNetworkWithDeviceInfoExist(vmi.Spec.Domain.Devices.Interfaces, t.clusterConfig.GetNetworkBindings()) ||
vmispec.SRIOVInterfaceExist(vmi.Spec.Domain.Devices.Interfaces) {
volumeOpts = append(volumeOpts, func(renderer *VolumeRenderer) error {
renderer.podVolumeMounts = append(renderer.podVolumeMounts, mountPath(downwardapi.NetworkInfoVolumeName, downwardapi.MountPath))
return nil
})
volumeOpts = append(volumeOpts, withNetworkDeviceInfoMapAnnotation())
}
if util.IsVMIVirtiofsEnabled(vmi) {
volumeOpts = append(volumeOpts, withVirioFS())
}
volumeRenderer, err := NewVolumeRenderer(
t.clusterConfig,
imageVolumeFeatureGateEnabled,
t.launcherImage,
imageIDs,
namespace,
t.ephemeralDiskDir,
t.containerDiskDir,
t.virtShareDir,
volumeOpts...)
if err != nil {
return nil, err
}
return volumeRenderer, nil
}
func (t *TemplateService) newResourceRenderer(vmi *v1.VirtualMachineInstance, networkToResourceMap map[string]string) (*ResourceRenderer, error) {
vmiResources := vmi.Spec.Domain.Resources
baseOptions := []ResourceRendererOption{
WithEphemeralStorageRequest(),
WithVirtualizationResources(getRequiredResources(vmi, t.clusterConfig.AllowEmulation())),
}
if err := validatePermittedHostDevices(&vmi.Spec, t.clusterConfig); err != nil {
return nil, err
}
options := append(baseOptions, t.VMIResourcePredicates(vmi, networkToResourceMap).Apply()...)
return NewResourceRenderer(vmiResources.Limits, vmiResources.Requests, options...), nil
}
func sidecarVolumeMount(containerName string) k8sv1.VolumeMount {
return k8sv1.VolumeMount{
Name: hookSidecarSocks,
MountPath: hooks.HookSocketsSharedDirectory,
SubPath: containerName,
}
}
func configMapVolumeMount(v hooks.ConfigMap) k8sv1.VolumeMount {
return k8sv1.VolumeMount{
Name: v.Name,
MountPath: v.HookPath,
SubPath: v.Key,
}
}
func pvcVolumeMount(v hooks.PVC) k8sv1.VolumeMount {
return k8sv1.VolumeMount{
Name: v.Name,
MountPath: v.VolumePath,
}
}
func gracePeriodInSeconds(vmi *v1.VirtualMachineInstance) int64 {
if vmi.Spec.TerminationGracePeriodSeconds != nil {
return *vmi.Spec.TerminationGracePeriodSeconds
}
return v1.DefaultGracePeriodSeconds
}
func sidecarContainerName(i int) string {
return fmt.Sprintf("hook-sidecar-%d", i)
}
func (t *TemplateService) RenderHotplugAttachmentPodTemplate(volumes []*v1.Volume, ownerPod *k8sv1.Pod, vmi *v1.VirtualMachineInstance, claimMap map[string]*k8sv1.PersistentVolumeClaim) (*k8sv1.Pod, error) {
zero := int64(0)
runUser := int64(util.NonRootUID)
sharedMount := k8sv1.MountPropagationHostToContainer
command := []string{"/bin/sh", "-c", "/usr/bin/container-disk --copy-path /path/hp"}
tmpTolerations := make([]k8sv1.Toleration, len(ownerPod.Spec.Tolerations))
copy(tmpTolerations, ownerPod.Spec.Tolerations)
pod := &k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "hp-volume-",
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(ownerPod, schema.GroupVersionKind{
Group: k8sv1.SchemeGroupVersion.Group,
Version: k8sv1.SchemeGroupVersion.Version,
Kind: "Pod",
}),
},
Labels: map[string]string{
v1.AppLabel: hotplugDisk,
},
},
Spec: k8sv1.PodSpec{
Containers: []k8sv1.Container{
{
Name: hotplugDisk,
Image: t.launcherImage,
Command: command,
Resources: hotplugContainerResourceRequirementsForVMI(t.clusterConfig),
SecurityContext: &k8sv1.SecurityContext{
AllowPrivilegeEscalation: pointer.P(false),
RunAsNonRoot: pointer.P(true),
RunAsUser: &runUser,
SeccompProfile: &k8sv1.SeccompProfile{
Type: k8sv1.SeccompProfileTypeRuntimeDefault,
},
Capabilities: &k8sv1.Capabilities{
Drop: []k8sv1.Capability{"ALL"},
},
SELinuxOptions: &k8sv1.SELinuxOptions{
// If SELinux is enabled on the host, this level will be adjusted below to match the level
// of its companion virt-launcher pod to allow it to consume our disk images.
Type: t.clusterConfig.GetSELinuxLauncherType(),
Level: "s0",
},
},
VolumeMounts: []k8sv1.VolumeMount{
{
Name: hotplugDisks,
MountPath: "/path",
MountPropagation: &sharedMount,
},
},
},
},
Affinity: &k8sv1.Affinity{
NodeAffinity: &k8sv1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{
NodeSelectorTerms: []k8sv1.NodeSelectorTerm{
{
MatchExpressions: []k8sv1.NodeSelectorRequirement{
{
Key: k8sv1.LabelHostname,
Operator: k8sv1.NodeSelectorOpIn,
Values: []string{ownerPod.Spec.NodeName},
},
},
},
},
},
},
},
Tolerations: tmpTolerations,
Volumes: []k8sv1.Volume{emptyDirVolume(hotplugDisks)},
TerminationGracePeriodSeconds: &zero,
},
}
err := matchSELinuxLevelOfVMI(pod, vmi)
if err != nil {
return nil, err
}
hotplugVolumeStatusMap := make(map[string]v1.VolumePhase)
for _, status := range vmi.Status.VolumeStatus {
if status.HotplugVolume != nil {
hotplugVolumeStatusMap[status.Name] = status.Phase
}
}
for _, volume := range volumes {
claimName := types.PVCNameFromVirtVolume(volume)
if claimName == "" {
continue
}
skipMount := false
if hotplugVolumeStatusMap[volume.Name] == v1.VolumeReady || hotplugVolumeStatusMap[volume.Name] == v1.HotplugVolumeMounted {
skipMount = true
}
pod.Spec.Volumes = append(pod.Spec.Volumes, k8sv1.Volume{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
},
})
pvc := claimMap[volume.Name]
if pvc == nil {
continue
}
if types.IsPVCBlock(pvc.Spec.VolumeMode) {
pod.Spec.Containers[0].VolumeDevices = append(pod.Spec.Containers[0].VolumeDevices, k8sv1.VolumeDevice{
Name: volume.Name,
DevicePath: fmt.Sprintf("/path/%s/%s", volume.Name, pvc.GetUID()),
})
} else {
if !skipMount {
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: fmt.Sprintf("/%s", volume.Name),
})
}
}
}
return pod, nil
}
func (t *TemplateService) RenderHotplugAttachmentTriggerPodTemplate(volume *v1.Volume, ownerPod *k8sv1.Pod, vmi *v1.VirtualMachineInstance, pvcName string, isBlock bool, tempPod bool) (*k8sv1.Pod, error) {
zero := int64(0)
runUser := int64(util.NonRootUID)
sharedMount := k8sv1.MountPropagationHostToContainer
var command []string
if tempPod {
command = []string{"/bin/bash",
"-c",
"exit", "0"}
} else {
command = []string{"/bin/sh", "-c", "/usr/bin/container-disk --copy-path /path/hp"}
}
annotationsList := make(map[string]string)
if tempPod {
// mark pod as temp - only used for provisioning
annotationsList[v1.EphemeralProvisioningObject] = "true"
}
tmpTolerations := make([]k8sv1.Toleration, len(ownerPod.Spec.Tolerations))
copy(tmpTolerations, ownerPod.Spec.Tolerations)
pod := &k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "hp-volume-",
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(ownerPod, schema.GroupVersionKind{
Group: k8sv1.SchemeGroupVersion.Group,
Version: k8sv1.SchemeGroupVersion.Version,
Kind: "Pod",
}),
},
Labels: map[string]string{
v1.AppLabel: hotplugDisk,
},
Annotations: annotationsList,
},
Spec: k8sv1.PodSpec{
Containers: []k8sv1.Container{
{
Name: hotplugDisk,
Image: t.launcherImage,
Command: command,
Resources: hotplugContainerResourceRequirementsForVMI(t.clusterConfig),
SecurityContext: &k8sv1.SecurityContext{
AllowPrivilegeEscalation: pointer.P(false),
RunAsNonRoot: pointer.P(true),
RunAsUser: &runUser,
SeccompProfile: &k8sv1.SeccompProfile{
Type: k8sv1.SeccompProfileTypeRuntimeDefault,
},
Capabilities: &k8sv1.Capabilities{
Drop: []k8sv1.Capability{"ALL"},
},
SELinuxOptions: &k8sv1.SELinuxOptions{
Level: "s0",
},
},
VolumeMounts: []k8sv1.VolumeMount{
{
Name: hotplugDisks,
MountPath: "/path",
MountPropagation: &sharedMount,
},
},
},
},
Affinity: &k8sv1.Affinity{
PodAffinity: &k8sv1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []k8sv1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: ownerPod.GetLabels(),
},
TopologyKey: k8sv1.LabelHostname,
},
},
},
},
Tolerations: tmpTolerations,
Volumes: []k8sv1.Volume{
{
Name: volume.Name,
VolumeSource: k8sv1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: false,
},
},
},
emptyDirVolume(hotplugDisks),
},
TerminationGracePeriodSeconds: &zero,
},
}
err := matchSELinuxLevelOfVMI(pod, vmi)
if err != nil {
return nil, err
}
if isBlock {
pod.Spec.Containers[0].VolumeDevices = []k8sv1.VolumeDevice{
{
Name: volume.Name,
DevicePath: "/dev/hotplugblockdevice",
},
}
pod.Spec.SecurityContext = &k8sv1.PodSecurityContext{
RunAsUser: &[]int64{0}[0],
}
} else {
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: "/pvc",
})
}
return pod, nil
}
func (t *TemplateService) RenderExporterManifest(vmExport *exportv1.VirtualMachineExport, namePrefix string) *k8sv1.Pod {
exporterPod := &k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
// Use of DNS1035LabelMaxLength here to align with
// VMExportController{}.getExportPodName
Name: naming.GetName(namePrefix, vmExport.Name, validation.DNS1035LabelMaxLength),
Namespace: vmExport.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(vmExport, schema.GroupVersionKind{
Group: exportv1.SchemeGroupVersion.Group,
Version: exportv1.SchemeGroupVersion.Version,
Kind: "VirtualMachineExport",
}),
},
Labels: map[string]string{
v1.AppLabel: virtExporter,
},
},
Spec: k8sv1.PodSpec{
RestartPolicy: k8sv1.RestartPolicyNever,
Containers: []k8sv1.Container{
{
Name: "exporter",
Image: t.exporterImage,
ImagePullPolicy: t.clusterConfig.GetImagePullPolicy(),
Env: []k8sv1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &k8sv1.EnvVarSource{
FieldRef: &k8sv1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
},
SecurityContext: &k8sv1.SecurityContext{
AllowPrivilegeEscalation: pointer.P(false),
Capabilities: &k8sv1.Capabilities{Drop: []k8sv1.Capability{"ALL"}},
},
Resources: vmExportContainerResourceRequirements(t.clusterConfig),
},
},
},
}
return exporterPod
}
func appendUniqueImagePullSecret(secrets []k8sv1.LocalObjectReference, newsecret k8sv1.LocalObjectReference) []k8sv1.LocalObjectReference {
for _, oldsecret := range secrets {
if oldsecret == newsecret {
return secrets
}
}
return append(secrets, newsecret)
}
func addProbeOverheads(vmi *v1.VirtualMachineInstance, quantity *resource.Quantity) {
// We need to add this overhead due to potential issues when using exec probes.
// In certain situations depending on things like node size and kernel versions
// the exec probe can cause a significant memory overhead that results in the pod getting OOM killed.
// To prevent this, we add this overhead until we have a better way of doing exec probes.
// The virtProbeTotalAdditionalOverhead is added for the virt-probe binary we use for probing and
// only added once, while the virtProbeOverhead is the general memory consumption of virt-probe
// that we add per added probe.
virtProbeTotalAdditionalOverhead := resource.MustParse("100Mi")
virtProbeOverhead := resource.MustParse("10Mi")
hasLiveness := vmi.Spec.LivenessProbe != nil && vmi.Spec.LivenessProbe.Exec != nil
hasReadiness := vmi.Spec.ReadinessProbe != nil && vmi.Spec.ReadinessProbe.Exec != nil
if hasLiveness {
quantity.Add(virtProbeOverhead)
}
if hasReadiness {
quantity.Add(virtProbeOverhead)
}
if hasLiveness || hasReadiness {
quantity.Add(virtProbeTotalAdditionalOverhead)
}
}
func HaveContainerDiskVolume(volumes []v1.Volume) bool {
for _, volume := range volumes {
if volume.ContainerDisk != nil {
return true
}
}
return false
}
type templateServiceOption func(*TemplateService)
func NewTemplateService(launcherImage string,
launcherQemuTimeout int,
virtShareDir string,
ephemeralDiskDir string,
containerDiskDir string,
hotplugDiskDir string,
imagePullSecret string,
persistentVolumeClaimCache cache.Store,
virtClient kubecli.KubevirtClient,
clusterConfig *virtconfig.ClusterConfig,
launcherSubGid int64,
exporterImage string,
resourceQuotaStore cache.Store,
namespaceStore cache.Store,
opts ...templateServiceOption,
) *TemplateService {
precond.MustNotBeEmpty(launcherImage)
log.Log.V(1).Infof("Exporter Image: %s", exporterImage)
svc := TemplateService{
launcherImage: launcherImage,
launcherQemuTimeout: launcherQemuTimeout,
virtShareDir: virtShareDir,
ephemeralDiskDir: ephemeralDiskDir,
containerDiskDir: containerDiskDir,
hotplugDiskDir: hotplugDiskDir,
imagePullSecret: imagePullSecret,
persistentVolumeClaimStore: persistentVolumeClaimCache,
virtClient: virtClient,
clusterConfig: clusterConfig,
launcherSubGid: launcherSubGid,
exporterImage: exporterImage,
resourceQuotaStore: resourceQuotaStore,
namespaceStore: namespaceStore,
}
for _, opt := range opts {
opt(&svc)
}
return &svc
}
func copyProbe(probe *v1.Probe) *k8sv1.Probe {
if probe == nil {
return nil
}
return &k8sv1.Probe{
InitialDelaySeconds: probe.InitialDelaySeconds,
TimeoutSeconds: probe.TimeoutSeconds,
PeriodSeconds: probe.PeriodSeconds,
SuccessThreshold: probe.SuccessThreshold,
FailureThreshold: probe.FailureThreshold,
ProbeHandler: k8sv1.ProbeHandler{
Exec: probe.Exec,
HTTPGet: probe.HTTPGet,
TCPSocket: probe.TCPSocket,
},
}
}
func wrapGuestAgentPingWithVirtProbe(vmi *v1.VirtualMachineInstance, probe *k8sv1.Probe) {
pingCommand := []string{
"virt-probe",
"--domainName", api.VMINamespaceKeyFunc(vmi),
"--timeoutSeconds", strconv.FormatInt(int64(probe.TimeoutSeconds), 10),
"--guestAgentPing",
}
probe.ProbeHandler.Exec = &k8sv1.ExecAction{Command: pingCommand}
// we add 1s to the pod probe to compensate for the additional steps in probing
probe.TimeoutSeconds += 1
return
}
func alignPodMultiCategorySecurity(pod *k8sv1.Pod, selinuxType string, dockerSELinuxMCSWorkaround bool) {
if selinuxType == "" && !dockerSELinuxMCSWorkaround {
// No SELinux type and no docker workaround, nothing to do
return
}
if selinuxType != "" {
if pod.Spec.SecurityContext == nil {
pod.Spec.SecurityContext = &k8sv1.PodSecurityContext{}
}
pod.Spec.SecurityContext.SELinuxOptions = &k8sv1.SELinuxOptions{Type: selinuxType}
}
if dockerSELinuxMCSWorkaround {
// more info on https://github.com/kubernetes/kubernetes/issues/90759
// Since the compute container needs to be able to communicate with the
// rest of the pod, we loop over all the containers and remove their SELinux
// categories.
// This currently only affects Docker + SELinux use-cases, and requires a
// feature gate to be set.
for i := range pod.Spec.Containers {
container := &pod.Spec.Containers[i]
if container.Name != "compute" {
generateContainerSecurityContext(selinuxType, container)
}
}
}
}
func matchSELinuxLevelOfVMI(pod *k8sv1.Pod, vmi *v1.VirtualMachineInstance) error {
if vmi.Status.SelinuxContext == "" {
if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.SourceState != nil && vmi.Status.MigrationState.SourceState.SelinuxContext != "" {
selinuxContext := vmi.Status.MigrationState.SourceState.SelinuxContext
if selinuxContext != "none" {
return setSELinuxContext(selinuxContext, pod)
}
return nil
}
return fmt.Errorf("VMI is missing SELinux context")
} else if vmi.Status.SelinuxContext != "none" {
return setSELinuxContext(vmi.Status.SelinuxContext, pod)
}
return nil
}
func setSELinuxContext(selinuxContext string, pod *k8sv1.Pod) error {
ctx := strings.Split(selinuxContext, ":")
if len(ctx) < 4 {
return fmt.Errorf("VMI has invalid SELinux context: %s", selinuxContext)
}
pod.Spec.Containers[0].SecurityContext.SELinuxOptions.Level = strings.Join(ctx[3:], ":")
return nil
}
func generateContainerSecurityContext(selinuxType string, container *k8sv1.Container) {
if container.SecurityContext == nil {
container.SecurityContext = &k8sv1.SecurityContext{}
}
if container.SecurityContext.SELinuxOptions == nil {
container.SecurityContext.SELinuxOptions = &k8sv1.SELinuxOptions{}
}
container.SecurityContext.SELinuxOptions.Type = selinuxType
container.SecurityContext.SELinuxOptions.Level = "s0"
}
func (t *TemplateService) generatePodAnnotations(vmi *v1.VirtualMachineInstance) (map[string]string, error) {
annotationsSet := map[string]string{
v1.DomainAnnotation: vmi.GetObjectMeta().GetName(),
}
maps.Copy(annotationsSet, filterVMIAnnotationsForPod(vmi.Annotations))
annotationsSet[podcmd.DefaultContainerAnnotationName] = "compute"
// Set this annotation now to indicate that the newly created virt-launchers will use
// unix sockets as a transport for migration
annotationsSet[v1.MigrationTransportUnixAnnotation] = "true"
annotationsSet[descheduler.EvictOnlyAnnotation] = ""
for _, generator := range t.annotationsGenerators {
annotations, err := generator.Generate(vmi)
if err != nil {
return nil, err
}
maps.Copy(annotationsSet, annotations)
}
return annotationsSet, nil
}
func filterVMIAnnotationsForPod(vmiAnnotations map[string]string) map[string]string {
annotationsList := map[string]string{}
for k, v := range vmiAnnotations {
if strings.HasPrefix(k, "kubectl.kubernetes.io") ||
strings.HasPrefix(k, "kubevirt.io/storage-observed-api-version") ||
strings.HasPrefix(k, "kubevirt.io/latest-observed-api-version") {
continue
}
annotationsList[k] = v
}
return annotationsList
}
func checkForKeepLauncherAfterFailure(vmi *v1.VirtualMachineInstance) bool {
keepLauncherAfterFailure := false
for k, v := range vmi.Annotations {
if strings.HasPrefix(k, v1.KeepLauncherAfterFailureAnnotation) {
if v == "" || strings.HasPrefix(v, "true") {
keepLauncherAfterFailure = true
break
}
}
}
return keepLauncherAfterFailure
}
func (t *TemplateService) doesVMIRequireAutoCPULimits(vmi *v1.VirtualMachineInstance) bool {
if t.doesVMIRequireAutoResourceLimits(vmi, k8sv1.ResourceCPU) {
return true
}
labelSelector := t.clusterConfig.GetConfig().AutoCPULimitNamespaceLabelSelector
_, limitSet := vmi.Spec.Domain.Resources.Limits[k8sv1.ResourceCPU]
if labelSelector == nil || limitSet {
return false
}
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
log.DefaultLogger().Reason(err).Warning("invalid CPULimitNamespaceLabelSelector set, assuming none")
return false
}
if t.namespaceStore == nil {
log.DefaultLogger().Reason(err).Warning("empty namespace informer")
return false
}
obj, exists, err := t.namespaceStore.GetByKey(vmi.Namespace)
if err != nil {
log.Log.Warning("Error retrieving namespace from informer")
return false
} else if !exists {
log.Log.Warningf("namespace %s does not exist.", vmi.Namespace)
return false
}
ns, ok := obj.(*k8sv1.Namespace)
if !ok {
log.Log.Errorf("couldn't cast object to Namespace: %+v", obj)
return false
}
if selector.Matches(labels.Set(ns.Labels)) {
return true
}
return false
}
func (t *TemplateService) VMIResourcePredicates(vmi *v1.VirtualMachineInstance, networkToResourceMap map[string]string) VMIResourcePredicates {
memoryOverhead := CalculateMemoryOverhead(t.clusterConfig, t.netBindingPluginMemoryCalculator, vmi)
withCPULimits := t.doesVMIRequireAutoCPULimits(vmi)
additionalCPUs := uint32(0)
if vmi.Spec.Domain.IOThreadsPolicy != nil &&
*vmi.Spec.Domain.IOThreadsPolicy == v1.IOThreadsPolicySupplementalPool &&
vmi.Spec.Domain.IOThreads != nil &&
vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount != nil {
additionalCPUs = *vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount
}
return VMIResourcePredicates{
vmi: vmi,
resourceRules: []VMIResourceRule{
// Run overcommit first to avoid overcommitting overhead memory
NewVMIResourceRule(emptyMemoryRequest, WithMemoryRequests(vmi.Spec.Domain.Memory, t.clusterConfig.GetMemoryOvercommit())),
NewVMIResourceRule(doesVMIRequireDedicatedCPU, WithCPUPinning(vmi, vmi.Annotations, additionalCPUs)),
NewVMIResourceRule(not(doesVMIRequireDedicatedCPU), WithoutDedicatedCPU(vmi, t.clusterConfig.GetCPUAllocationRatio(), withCPULimits)),
NewVMIResourceRule(hasHugePages, WithHugePages(vmi.Spec.Domain.Memory, memoryOverhead)),
NewVMIResourceRule(not(hasHugePages), WithMemoryOverhead(vmi.Spec.Domain.Resources, memoryOverhead)),
NewVMIResourceRule(t.doesVMIRequireAutoMemoryLimits, WithAutoMemoryLimits(vmi.Namespace, t.namespaceStore)),
NewVMIResourceRule(func(*v1.VirtualMachineInstance) bool {
return len(networkToResourceMap) > 0
}, WithNetworkResources(networkToResourceMap)),
NewVMIResourceRule(isGPUVMIDevicePlugins, WithGPUsDevicePlugins(vmi.Spec.Domain.Devices.GPUs)),
NewVMIResourceRule(func(vmi *v1.VirtualMachineInstance) bool {
return t.clusterConfig.GPUsWithDRAGateEnabled() && isGPUVMIDRA(vmi)
}, WithGPUsDRA(vmi.Spec.Domain.Devices.GPUs)),
NewVMIResourceRule(isHostDevVMIDevicePlugins, WithHostDevicesDevicePlugins(vmi.Spec.Domain.Devices.HostDevices)),
NewVMIResourceRule(func(vmi *v1.VirtualMachineInstance) bool {
return t.clusterConfig.HostDevicesWithDRAEnabled() && isHostDevVMIDRA(vmi)
}, WithHostDevicesDRA(vmi.Spec.Domain.Devices.HostDevices)),
NewVMIResourceRule(util.IsSEVVMI, WithSEV()),
NewVMIResourceRule(reservation.HasVMIPersistentReservation, WithPersistentReservation()),
},
}
}
func CalculateMemoryOverhead(clusterConfig *virtconfig.ClusterConfig, netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator, vmi *v1.VirtualMachineInstance) resource.Quantity {
// Set default with vmi Architecture. compatible with multi-architecture hybrid environments
vmiCPUArch := vmi.Spec.Architecture
if vmiCPUArch == "" {
vmiCPUArch = clusterConfig.GetClusterCPUArch()
}
memoryOverhead := GetMemoryOverhead(vmi, vmiCPUArch, clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio)
if netBindingPluginMemoryCalculator != nil {
memoryOverhead.Add(
netBindingPluginMemoryCalculator.Calculate(vmi, clusterConfig.GetNetworkBindings()),
)
}
return memoryOverhead
}
func (t *TemplateService) doesVMIRequireAutoMemoryLimits(vmi *v1.VirtualMachineInstance) bool {
return t.doesVMIRequireAutoResourceLimits(vmi, k8sv1.ResourceMemory)
}
func (t *TemplateService) doesVMIRequireAutoResourceLimits(vmi *v1.VirtualMachineInstance, resource k8sv1.ResourceName) bool {
if _, resourceLimitsExists := vmi.Spec.Domain.Resources.Limits[resource]; resourceLimitsExists {
return false
}
for _, obj := range t.resourceQuotaStore.List() {
if resourceQuota, ok := obj.(*k8sv1.ResourceQuota); ok {
if _, exists := resourceQuota.Spec.Hard["limits."+resource]; exists && resourceQuota.Namespace == vmi.Namespace {
return true
}
}
}
return false
}
func (p VMIResourcePredicates) Apply() []ResourceRendererOption {
var options []ResourceRendererOption
for _, rule := range p.resourceRules {
if rule.predicate(p.vmi) {
options = append(options, rule.option)
}
}
return options
}
func podLabels(vmi *v1.VirtualMachineInstance, hostName string) map[string]string {
labels := map[string]string{}
for k, v := range vmi.Labels {
labels[k] = v
}
labels[v1.AppLabel] = "virt-launcher"
labels[v1.CreatedByLabel] = string(vmi.UID)
labels[v1.DeprecatedVirtualMachineNameLabel] = hostName
labels[v1.VirtualMachineInstanceIDLabel] = apimachinery.CalculateVirtualMachineInstanceID(vmi.Name)
if val, exists := vmi.Annotations[istio.InjectSidecarAnnotation]; exists {
labels[istio.InjectSidecarLabel] = val
}
return labels
}
func readinessGates() []k8sv1.PodReadinessGate {
return []k8sv1.PodReadinessGate{
{
ConditionType: v1.VirtualMachineUnpaused,
},
}
}
func WithNetBindingPluginMemoryCalculator(netBindingPluginMemoryCalculator netBindingPluginMemoryCalculator) templateServiceOption {
return func(service *TemplateService) {
service.netBindingPluginMemoryCalculator = netBindingPluginMemoryCalculator
}
}
func WithAnnotationsGenerators(generators ...annotationsGenerator) templateServiceOption {
return func(service *TemplateService) {
service.annotationsGenerators = append(service.annotationsGenerators, generators...)
}
}
func WithNetTargetAnnotationsGenerator(generator targetAnnotationsGenerator) templateServiceOption {
return func(service *TemplateService) {
service.netTargetAnnotationsGenerator = generator
}
}
func hasHugePages(vmi *v1.VirtualMachineInstance) bool {
return vmi.Spec.Domain.Memory != nil && vmi.Spec.Domain.Memory.Hugepages != nil
}
// isGPUVMIDevicePlugins checks if a VMI has any GPUs configured for device plugins
func isGPUVMIDevicePlugins(vmi *v1.VirtualMachineInstance) bool {
for _, gpu := range vmi.Spec.Domain.Devices.GPUs {
if isGPUDevicePlugin(gpu) {
return true
}
}
return false
}
func isGPUDevicePlugin(gpu v1.GPU) bool {
return gpu.DeviceName != "" && gpu.ClaimRequest == nil
}
// isGPUVMIDRA checks if a VMI has any GPUs configured for Dynamic Resource Allocation
func isGPUVMIDRA(vmi *v1.VirtualMachineInstance) bool {
for _, gpu := range vmi.Spec.Domain.Devices.GPUs {
if drautil.IsGPUDRA(gpu) {
return true
}
}
return false
}
// isHostDevVMIDevicePlugins checks if a VMI has any HostDevices configured for device plugins
func isHostDevVMIDevicePlugins(vmi *v1.VirtualMachineInstance) bool {
if vmi.Spec.Domain.Devices.HostDevices == nil {
return false
}
for _, hostDev := range vmi.Spec.Domain.Devices.HostDevices {
if hostDev.DeviceName != "" && hostDev.ClaimRequest == nil {
return true
}
}
return false
}
// isHostDevVMIDRA checks if a VMI has any HostDevices configured for Dynamic Resource Allocation
func isHostDevVMIDRA(vmi *v1.VirtualMachineInstance) bool {
if vmi.Spec.Domain.Devices.HostDevices == nil {
return false
}
for _, hostDev := range vmi.Spec.Domain.Devices.HostDevices {
if hostDev.DeviceName == "" && hostDev.ClaimRequest != nil {
return true
}
}
return false
}
func emptyMemoryRequest(vmi *v1.VirtualMachineInstance) bool {
resources := &vmi.Spec.Domain.Resources
return resources.Requests.Memory().IsZero()
}
package services
import (
"fmt"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/config"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virtiofs"
)
func generateVirtioFSContainers(vmi *v1.VirtualMachineInstance, image string, config *virtconfig.ClusterConfig) []k8sv1.Container {
passthroughFSVolumes := make(map[string]struct{})
for i := range vmi.Spec.Domain.Devices.Filesystems {
passthroughFSVolumes[vmi.Spec.Domain.Devices.Filesystems[i].Name] = struct{}{}
}
if len(passthroughFSVolumes) == 0 {
return nil
}
containers := []k8sv1.Container{}
for _, volume := range vmi.Spec.Volumes {
if _, isPassthroughFSVolume := passthroughFSVolumes[volume.Name]; isPassthroughFSVolume {
resources := resourcesForVirtioFSContainer(vmi.IsCPUDedicated(), vmi.IsCPUDedicated() || vmi.WantsToHaveQOSGuaranteed(), config)
container := generateContainerFromVolume(&volume, image, resources)
containers = append(containers, container)
}
}
return containers
}
func resourcesForVirtioFSContainer(dedicatedCPUs bool, guaranteedQOS bool, config *virtconfig.ClusterConfig) k8sv1.ResourceRequirements {
resources := k8sv1.ResourceRequirements{Requests: k8sv1.ResourceList{}, Limits: k8sv1.ResourceList{}}
resources.Requests[k8sv1.ResourceCPU] = resource.MustParse("10m")
if reqCpu := config.GetSupportContainerRequest(v1.VirtioFS, k8sv1.ResourceCPU); reqCpu != nil {
resources.Requests[k8sv1.ResourceCPU] = *reqCpu
}
resources.Limits[k8sv1.ResourceMemory] = resource.MustParse("80M")
if limMem := config.GetSupportContainerLimit(v1.VirtioFS, k8sv1.ResourceMemory); limMem != nil {
resources.Limits[k8sv1.ResourceMemory] = *limMem
}
resources.Limits[k8sv1.ResourceCPU] = resource.MustParse("100m")
if limCpu := config.GetSupportContainerLimit(v1.VirtioFS, k8sv1.ResourceCPU); limCpu != nil {
resources.Limits[k8sv1.ResourceCPU] = *limCpu
}
if dedicatedCPUs || guaranteedQOS {
resources.Requests[k8sv1.ResourceCPU] = resources.Limits[k8sv1.ResourceCPU]
}
if guaranteedQOS {
resources.Requests[k8sv1.ResourceMemory] = resources.Limits[k8sv1.ResourceMemory]
} else {
resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("1M")
if reqMem := config.GetSupportContainerRequest(v1.VirtioFS, k8sv1.ResourceMemory); reqMem != nil {
resources.Requests[k8sv1.ResourceMemory] = *reqMem
}
}
return resources
}
func isAutoMount(volume *v1.Volume) bool {
// The template service sets pod.Spec.AutomountServiceAccountToken as true
return volume.ServiceAccount != nil
}
func virtioFSMountPoint(volume *v1.Volume) string {
volumeMountPoint := fmt.Sprintf("/%s", volume.Name)
if volume.ConfigMap != nil {
volumeMountPoint = config.GetConfigMapSourcePath(volume.Name)
} else if volume.Secret != nil {
volumeMountPoint = config.GetSecretSourcePath(volume.Name)
} else if volume.ServiceAccount != nil {
volumeMountPoint = config.ServiceAccountSourceDir
} else if volume.DownwardAPI != nil {
volumeMountPoint = config.GetDownwardAPISourcePath(volume.Name)
}
return volumeMountPoint
}
func generateContainerFromVolume(volume *v1.Volume, image string, resources k8sv1.ResourceRequirements) k8sv1.Container {
socketPathArg := fmt.Sprintf("--socket-path=%s", virtiofs.VirtioFSSocketPath(volume.Name))
sourceArg := fmt.Sprintf("--shared-dir=%s", virtioFSMountPoint(volume))
args := []string{socketPathArg, sourceArg, "--sandbox=none", "--cache=auto"}
// If some files cannot be migrated, let's allow the migration to finish.
// Mark these files as invalid, the guest will not be able to access any such files,
// receiving only errors
args = append(args, "--migration-on-error=guest-error")
// This mode look up its file references paths by reading the symlinks in /proc/self/fd,
// falling back to iterating through the shared directory (exhaustive search) to find those paths.
// This migration mode doesn't require any privileges.
args = append(args, "--migration-mode=find-paths")
volumeMounts := []k8sv1.VolumeMount{
// This is required to pass socket to compute
{
Name: virtiofs.VirtioFSContainers,
MountPath: virtiofs.VirtioFSContainersMountBaseDir,
},
}
if !isAutoMount(volume) {
volumeMounts = append(volumeMounts, k8sv1.VolumeMount{
Name: volume.Name,
MountPath: virtioFSMountPoint(volume),
})
}
return k8sv1.Container{
Name: fmt.Sprintf("virtiofs-%s", volume.Name),
Image: image,
ImagePullPolicy: k8sv1.PullIfNotPresent,
Command: []string{"/usr/libexec/virtiofsd"},
Args: args,
VolumeMounts: volumeMounts,
Resources: resources,
SecurityContext: &k8sv1.SecurityContext{
RunAsUser: pointer.P(int64(util.NonRootUID)),
RunAsGroup: pointer.P(int64(util.NonRootUID)),
RunAsNonRoot: pointer.P(true),
AllowPrivilegeEscalation: pointer.P(false),
Capabilities: &k8sv1.Capabilities{
Drop: []k8sv1.Capability{
"ALL",
},
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package clone
import (
"context"
"errors"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
clone "kubevirt.io/api/clone/v1beta1"
k6tv1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/pointer"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
virtsnapshot "kubevirt.io/kubevirt/pkg/storage/snapshot"
)
type cloneSourceType string
const (
sourceTypeVM cloneSourceType = "VirtualMachine"
sourceTypeSnapshot cloneSourceType = "VirtualMachineSnapshot"
)
type cloneTargetType string
const (
targetTypeVM cloneTargetType = "VirtualMachine"
defaultType cloneTargetType = targetTypeVM
)
type syncInfoType struct {
err error
snapshotName string
snapshotReady bool
restoreName string
restoreReady bool
targetVMName string
targetVMCreated bool
pvcBound bool
event Event
reason string
isCloneFailing bool
isClonePending bool
}
// vmCloneInfo stores the current vmclone information
type vmCloneInfo struct {
vmClone *clone.VirtualMachineClone
sourceType cloneSourceType
snapshot *snapshotv1.VirtualMachineSnapshot
snapshotName string
sourceVm *k6tv1.VirtualMachine
}
func (ctrl *VMCloneController) execute(key string) error {
logger := log.Log
obj, cloneExists, err := ctrl.vmCloneIndexer.GetByKey(key)
if err != nil {
return err
}
var vmClone *clone.VirtualMachineClone
if cloneExists {
vmClone = obj.(*clone.VirtualMachineClone)
logger = logger.Object(vmClone)
} else {
return nil
}
if vmClone.Status.Phase == clone.Succeeded {
_, vmExists, err := ctrl.vmStore.GetByKey(fmt.Sprintf("%s/%s", vmClone.Namespace, *vmClone.Status.TargetName))
if err != nil {
return err
}
if !vmExists {
if vmClone.DeletionTimestamp == nil {
logger.V(3).Infof("Deleting vm clone for deleted vm %s/%s", vmClone.Namespace, *vmClone.Status.TargetName)
return ctrl.client.VirtualMachineClone(vmClone.Namespace).Delete(context.Background(), vmClone.Name, v1.DeleteOptions{})
}
// nothing to process for a vm clone that's being deleted
return nil
}
}
syncInfo, err := ctrl.sync(vmClone)
if err != nil {
return fmt.Errorf("sync error: %v", err)
}
err = ctrl.updateStatus(vmClone, syncInfo)
if err != nil {
return fmt.Errorf("error updating status: %v", err)
}
if syncErr := syncInfo.err; syncErr != nil {
return fmt.Errorf("sync error: %v", syncErr)
}
return nil
}
func (ctrl *VMCloneController) sync(vmClone *clone.VirtualMachineClone) (syncInfoType, error) {
cloneInfo, err := ctrl.retrieveCloneInfo(vmClone)
if err != nil {
switch errors.Unwrap(err) {
case ErrSourceDoesntExist:
// If source does not exist we will wait for source
// to be created and then vmclone will get reconciled again.
return syncInfoType{
isClonePending: true,
event: SourceDoesNotExist,
reason: err.Error(),
}, nil
case ErrSourceWithBackendStorage:
return syncInfoType{
isCloneFailing: true,
event: SourceWithBackendStorageInvalid,
reason: err.Error(),
}, nil
default:
return syncInfoType{}, err
}
}
if ctrl.getTargetType(cloneInfo.vmClone) == targetTypeVM {
return ctrl.syncTargetVM(cloneInfo), nil
}
return syncInfoType{err: fmt.Errorf("target type is unknown: %s", ctrl.getTargetType(cloneInfo.vmClone))}, nil
}
// retrieveCloneInfo initializes all the snapshot and restore information that can be populated from the vm clone resource
func (ctrl *VMCloneController) retrieveCloneInfo(vmClone *clone.VirtualMachineClone) (*vmCloneInfo, error) {
sourceInfo := vmClone.Spec.Source
cloneInfo := vmCloneInfo{
vmClone: vmClone,
sourceType: cloneSourceType(sourceInfo.Kind),
}
switch cloneSourceType(sourceInfo.Kind) {
case sourceTypeVM:
sourceVMObj, err := ctrl.getSource(vmClone, sourceInfo.Name, vmClone.Namespace, string(sourceTypeVM), ctrl.vmStore)
if err != nil {
return nil, err
}
sourceVM := sourceVMObj.(*k6tv1.VirtualMachine)
if backendstorage.IsBackendStorageNeeded(sourceVM) {
return nil, fmt.Errorf("%w: VM %s/%s", ErrSourceWithBackendStorage, vmClone.Namespace, sourceInfo.Name)
}
cloneInfo.sourceVm = sourceVM
case sourceTypeSnapshot:
sourceSnapshotObj, err := ctrl.getSource(vmClone, sourceInfo.Name, vmClone.Namespace, string(sourceTypeSnapshot), ctrl.snapshotStore)
if err != nil {
return nil, err
}
sourceSnapshot := sourceSnapshotObj.(*snapshotv1.VirtualMachineSnapshot)
cloneInfo.snapshot = sourceSnapshot
cloneInfo.snapshotName = sourceSnapshot.Name
default:
return nil, fmt.Errorf("clone %s is defined with an unknown source type %s", vmClone.Name, sourceInfo.Kind)
}
if cloneInfo.snapshotName == "" && vmClone.Status.SnapshotName != nil {
cloneInfo.snapshotName = *vmClone.Status.SnapshotName
}
return &cloneInfo, nil
}
func (ctrl *VMCloneController) syncTargetVM(vmCloneInfo *vmCloneInfo) syncInfoType {
vmClone := vmCloneInfo.vmClone
syncInfo := syncInfoType{}
switch vmClone.Status.Phase {
case clone.PhaseUnset, clone.SnapshotInProgress:
if vmCloneInfo.sourceType == sourceTypeVM {
if vmClone.Status.SnapshotName == nil {
syncInfo = ctrl.createSnapshotFromVm(vmClone, vmCloneInfo.sourceVm, syncInfo)
return syncInfo
}
}
vmCloneInfo.snapshot, syncInfo = ctrl.verifySnapshotReady(vmClone, vmCloneInfo.snapshotName, vmCloneInfo.vmClone.Namespace, syncInfo)
if syncInfo.isFailingOrError() || !syncInfo.snapshotReady {
return syncInfo
}
fallthrough
case clone.RestoreInProgress:
// Here we have to know the snapshot name
if vmCloneInfo.snapshot == nil {
vmCloneInfo.snapshot, syncInfo = ctrl.getSnapshot(vmCloneInfo.snapshotName, vmCloneInfo.vmClone.Namespace, syncInfo)
if syncInfo.isFailingOrError() {
return syncInfo
}
}
if vmClone.Status.RestoreName == nil {
vm, err := ctrl.getVmFromSnapshot(vmCloneInfo.snapshot)
if err != nil {
syncInfo.setError(fmt.Errorf("cannot get VM manifest from snapshot: %v", err))
return syncInfo
}
syncInfo = ctrl.createRestoreFromVm(vmClone, vm, vmCloneInfo.snapshotName, syncInfo)
return syncInfo
}
syncInfo = ctrl.verifyRestoreReady(vmClone, vmCloneInfo.vmClone.Namespace, syncInfo)
if syncInfo.isFailingOrError() || !syncInfo.restoreReady {
return syncInfo
}
fallthrough
case clone.CreatingTargetVM:
syncInfo = ctrl.verifyVmReady(vmClone, syncInfo)
if syncInfo.isFailingOrError() {
return syncInfo
}
fallthrough
case clone.Succeeded:
if vmClone.Status.RestoreName != nil {
syncInfo = ctrl.verifyPVCBound(vmClone, syncInfo)
if syncInfo.isFailingOrError() || !syncInfo.pvcBound {
return syncInfo
}
syncInfo = ctrl.cleanupRestore(vmClone, syncInfo)
if syncInfo.isFailingOrError() {
return syncInfo
}
if vmCloneInfo.sourceType == sourceTypeVM {
syncInfo = ctrl.cleanupSnapshot(vmClone, syncInfo)
if syncInfo.isFailingOrError() {
return syncInfo
}
}
}
default:
log.Log.Object(vmClone).Infof("clone %s is in phase %s - nothing to do", vmClone.Name, string(vmClone.Status.Phase))
}
return syncInfo
}
func (ctrl *VMCloneController) updateStatus(origClone *clone.VirtualMachineClone, syncInfo syncInfoType) error {
vmClone := origClone.DeepCopy()
var phaseChanged bool
assignPhase := func(phase clone.VirtualMachineClonePhase) {
vmClone.Status.Phase = phase
phaseChanged = true
}
switch {
case syncInfo.isClonePending:
ctrl.logAndRecord(vmClone, syncInfo.event, syncInfo.reason)
updateCloneConditions(vmClone,
newProgressingCondition(corev1.ConditionFalse, "Pending"),
newReadyCondition(corev1.ConditionFalse, syncInfo.reason),
)
case syncInfo.isCloneFailing:
ctrl.logAndRecord(vmClone, syncInfo.event, syncInfo.reason)
assignPhase(clone.Failed)
updateCloneConditions(vmClone,
newProgressingCondition(corev1.ConditionFalse, "Failed"),
newReadyCondition(corev1.ConditionFalse, syncInfo.reason),
)
default:
updateCloneConditions(vmClone,
newProgressingCondition(corev1.ConditionTrue, "Still processing"),
newReadyCondition(corev1.ConditionFalse, "Still processing"),
)
}
if isInPhase(vmClone, clone.PhaseUnset) && !syncInfo.isClonePending {
assignPhase(clone.SnapshotInProgress)
}
if isInPhase(vmClone, clone.SnapshotInProgress) {
if snapshotName := syncInfo.snapshotName; snapshotName != "" {
vmClone.Status.SnapshotName = pointer.P(snapshotName)
}
if syncInfo.snapshotReady {
assignPhase(clone.RestoreInProgress)
}
}
if isInPhase(vmClone, clone.RestoreInProgress) {
if restoreName := syncInfo.restoreName; restoreName != "" {
vmClone.Status.RestoreName = pointer.P(restoreName)
}
if syncInfo.restoreReady {
assignPhase(clone.CreatingTargetVM)
}
}
if isInPhase(vmClone, clone.CreatingTargetVM) {
if targetVMName := syncInfo.targetVMName; targetVMName != "" {
vmClone.Status.TargetName = pointer.P(targetVMName)
}
if syncInfo.targetVMCreated {
assignPhase(clone.Succeeded)
}
}
if isInPhase(vmClone, clone.Succeeded) {
updateCloneConditions(vmClone,
newProgressingCondition(corev1.ConditionFalse, "Ready"),
newReadyCondition(corev1.ConditionTrue, "Ready"),
)
}
if syncInfo.pvcBound {
vmClone.Status.SnapshotName = nil
vmClone.Status.RestoreName = nil
}
if !equality.Semantic.DeepEqual(vmClone.Status, origClone.Status) {
if phaseChanged {
log.Log.Object(vmClone).Infof("Changing phase to %s", vmClone.Status.Phase)
}
_, err := ctrl.client.VirtualMachineClone(vmClone.Namespace).UpdateStatus(context.Background(), vmClone, v1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
func validateVolumeSnapshotStatus(vm *k6tv1.VirtualMachine) error {
var vssErr error
for _, v := range vm.Spec.Template.Spec.Volumes {
if v.PersistentVolumeClaim != nil || v.DataVolume != nil {
found := false
for _, vss := range vm.Status.VolumeSnapshotStatuses {
if v.Name == vss.Name {
if !vss.Enabled {
vssErr = errors.Join(vssErr, fmt.Errorf(ErrVolumeNotSnapshotable, v.Name))
}
found = true
break
}
}
if !found {
vssErr = errors.Join(vssErr, fmt.Errorf(ErrVolumeSnapshotSupportUnknown, v.Name))
}
}
}
return vssErr
}
func (ctrl *VMCloneController) createSnapshotFromVm(vmClone *clone.VirtualMachineClone, vm *k6tv1.VirtualMachine, syncInfo syncInfoType) syncInfoType {
err := validateVolumeSnapshotStatus(vm)
if err != nil {
return syncInfoType{
isClonePending: true,
event: VMVolumeSnapshotsInvalid,
reason: err.Error(),
}
}
snapshot := generateSnapshot(vmClone, vm)
log.Log.Object(vmClone).Infof("creating snapshot %s for clone %s", snapshot.Name, vmClone.Name)
createdSnapshot, err := ctrl.client.VirtualMachineSnapshot(snapshot.Namespace).Create(context.Background(), snapshot, v1.CreateOptions{})
if err != nil {
if !k8serrors.IsAlreadyExists(err) {
syncInfo.setError(fmt.Errorf("failed creating snapshot %s for clone %s: %v", snapshot.Name, vmClone.Name, err))
return syncInfo
}
syncInfo.snapshotName = snapshot.Name
return syncInfo
}
snapshot = createdSnapshot
ctrl.logAndRecord(vmClone, SnapshotCreated, fmt.Sprintf("created snapshot %s for clone %s", snapshot.Name, vmClone.Name))
syncInfo.snapshotName = snapshot.Name
log.Log.Object(vmClone).V(defaultVerbosityLevel).Infof("snapshot %s was just created, reenqueuing to let snapshot time to finish", snapshot.Name)
return syncInfo
}
func (ctrl *VMCloneController) verifySnapshotReady(vmClone *clone.VirtualMachineClone, name, namespace string, syncInfo syncInfoType) (*snapshotv1.VirtualMachineSnapshot, syncInfoType) {
obj, exists, err := ctrl.snapshotStore.GetByKey(getKey(name, namespace))
if err != nil {
syncInfo.setError(fmt.Errorf("error getting snapshot %s from cache for clone %s: %v", name, vmClone.Name, err))
return nil, syncInfo
} else if !exists {
syncInfo.setError(fmt.Errorf("snapshot %s is not created yet for clone %s", name, vmClone.Name))
return nil, syncInfo
}
snapshot := obj.(*snapshotv1.VirtualMachineSnapshot)
log.Log.Object(vmClone).Infof("found snapshot %s for clone %s", snapshot.Name, vmClone.Name)
if !virtsnapshot.VmSnapshotReady(snapshot) {
log.Log.Object(vmClone).V(defaultVerbosityLevel).Infof("snapshot %s for clone %s is not ready to use yet", snapshot.Name, vmClone.Name)
return snapshot, syncInfo
}
if err := ctrl.verifySnapshotContent(snapshot); err != nil {
// At this point the snapshot is already succeded and ready.
// If there is an issue with the snapshot content something is not right
// and the clone should fail
syncInfo.isCloneFailing = true
syncInfo.event = SnapshotContentInvalid
syncInfo.reason = err.Error()
return nil, syncInfo
}
ctrl.logAndRecord(vmClone, SnapshotReady, fmt.Sprintf("snapshot %s for clone %s is ready to use", snapshot.Name, vmClone.Name))
syncInfo.snapshotReady = true
return snapshot, syncInfo
}
func (ctrl *VMCloneController) getSnapshotContent(snapshot *snapshotv1.VirtualMachineSnapshot) (*snapshotv1.VirtualMachineSnapshotContent, error) {
contentName := virtsnapshot.GetVMSnapshotContentName(snapshot)
contentKey := getKey(contentName, snapshot.Namespace)
contentObj, exists, err := ctrl.snapshotContentStore.GetByKey(contentKey)
if !exists {
return nil, fmt.Errorf("snapshot content %s in namespace %s does not exist", contentName, snapshot.Namespace)
} else if err != nil {
return nil, err
}
return contentObj.(*snapshotv1.VirtualMachineSnapshotContent), nil
}
func (ctrl *VMCloneController) verifySnapshotContent(snapshot *snapshotv1.VirtualMachineSnapshot) error {
content, err := ctrl.getSnapshotContent(snapshot)
if err != nil {
return err
}
if content.Spec.VirtualMachineSnapshotName == nil {
return fmt.Errorf("cannot get snapshot name from content %s", content.Name)
}
snapshotName := *content.Spec.VirtualMachineSnapshotName
vm := content.Spec.Source.VirtualMachine
if vm.Spec.Template == nil {
return nil
}
if backendstorage.IsBackendStorageNeeded(vm) {
return fmt.Errorf("%w: snapshot %s/%s", ErrSourceWithBackendStorage, snapshot.Namespace, snapshot.Name)
}
var volumesNotBackedUpErr error
for _, volume := range vm.Spec.Template.Spec.Volumes {
if volume.PersistentVolumeClaim == nil && volume.DataVolume == nil {
continue
}
foundBackup := false
for _, volumeBackup := range content.Spec.VolumeBackups {
if volume.Name == volumeBackup.VolumeName {
foundBackup = true
break
}
}
if !foundBackup {
volumesNotBackedUpErr = errors.Join(volumesNotBackedUpErr, fmt.Errorf(ErrVolumeNotBackedUp, volume.Name, snapshotName))
}
}
return volumesNotBackedUpErr
}
// This method assumes the snapshot exists. If it doesn't - syncInfo is updated accordingly.
func (ctrl *VMCloneController) getSnapshot(snapshotName string, sourceNamespace string, syncInfo syncInfoType) (*snapshotv1.VirtualMachineSnapshot, syncInfoType) {
obj, exists, err := ctrl.snapshotStore.GetByKey(getKey(snapshotName, sourceNamespace))
if !exists {
// At this point the snapshot is already created. If it doesn't exist it means that it's deleted for some
// reason and the clone should fail
syncInfo.isCloneFailing = true
syncInfo.event = SnapshotDeleted
syncInfo.reason = fmt.Sprintf("snapshot %s does not exist anymore", snapshotName)
return nil, syncInfo
}
if err != nil {
syncInfo.setError(fmt.Errorf("error getting snapshot %s from cache: %v", snapshotName, err))
return nil, syncInfo
}
snapshot := obj.(*snapshotv1.VirtualMachineSnapshot)
return snapshot, syncInfo
}
func (ctrl *VMCloneController) createRestoreFromVm(vmClone *clone.VirtualMachineClone, vm *k6tv1.VirtualMachine, snapshotName string, syncInfo syncInfoType) syncInfoType {
patches, err := generatePatches(vm, &vmClone.Spec)
if err != nil {
retErr := fmt.Errorf("error generating patches for clone %s: %v", vmClone.Name, err)
ctrl.recorder.Event(vmClone, corev1.EventTypeWarning, string(RestoreCreationFailed), retErr.Error())
syncInfo.setError(retErr)
return syncInfo
}
restore := generateRestore(vmClone.Spec.Target, vm.Name, vmClone.Namespace, vmClone.Name, snapshotName, vmClone.UID, patches)
log.Log.Object(vmClone).Infof("creating restore %s for clone %s", restore.Name, vmClone.Name)
createdRestore, err := ctrl.client.VirtualMachineRestore(restore.Namespace).Create(context.Background(), restore, v1.CreateOptions{})
if err != nil {
if !k8serrors.IsAlreadyExists(err) {
retErr := fmt.Errorf("failed creating restore %s for clone %s: %v", restore.Name, vmClone.Name, err)
ctrl.recorder.Event(vmClone, corev1.EventTypeWarning, string(RestoreCreationFailed), retErr.Error())
syncInfo.setError(retErr)
return syncInfo
}
syncInfo.restoreName = restore.Name
return syncInfo
}
restore = createdRestore
ctrl.logAndRecord(vmClone, RestoreCreated, fmt.Sprintf("created restore %s for clone %s", restore.Name, vmClone.Name))
syncInfo.restoreName = restore.Name
log.Log.Object(vmClone).V(defaultVerbosityLevel).Infof("restore %s was just created, reenqueuing to let snapshot time to finish", restore.Name)
return syncInfo
}
func (ctrl *VMCloneController) verifyRestoreReady(vmClone *clone.VirtualMachineClone, sourceNamespace string, syncInfo syncInfoType) syncInfoType {
obj, exists, err := ctrl.restoreStore.GetByKey(getKey(*vmClone.Status.RestoreName, sourceNamespace))
if !exists {
syncInfo.setError(fmt.Errorf("restore %s is not created yet for clone %s", *vmClone.Status.RestoreName, vmClone.Name))
return syncInfo
} else if err != nil {
syncInfo.setError(fmt.Errorf("error getting restore %s from cache for clone %s: %v", *vmClone.Status.RestoreName, vmClone.Name, err))
return syncInfo
}
restore := obj.(*snapshotv1.VirtualMachineRestore)
log.Log.Object(vmClone).Infof("found target restore %s for clone %s", restore.Name, vmClone.Name)
if virtsnapshot.VmRestoreProgressing(restore) {
log.Log.Object(vmClone).V(defaultVerbosityLevel).Infof("restore %s for clone %s is not ready to use yet", restore.Name, vmClone.Name)
return syncInfo
}
ctrl.logAndRecord(vmClone, RestoreReady, fmt.Sprintf("restore %s for clone %s is ready to use", restore.Name, vmClone.Name))
syncInfo.restoreReady = true
syncInfo.targetVMName = restore.Spec.Target.Name
return syncInfo
}
func (ctrl *VMCloneController) verifyVmReady(vmClone *clone.VirtualMachineClone, syncInfo syncInfoType) syncInfoType {
targetVMInfo := vmClone.Spec.Target
_, exists, err := ctrl.vmStore.GetByKey(getKey(targetVMInfo.Name, vmClone.Namespace))
if !exists {
syncInfo.setError(fmt.Errorf("target VM %s is not created yet for clone %s", targetVMInfo.Name, vmClone.Name))
return syncInfo
} else if err != nil {
syncInfo.setError(fmt.Errorf("error getting VM %s from cache for clone %s: %v", targetVMInfo.Name, vmClone.Name, err))
return syncInfo
}
ctrl.logAndRecord(vmClone, TargetVMCreated, fmt.Sprintf("created target VM %s for clone %s", targetVMInfo.Name, vmClone.Name))
syncInfo.targetVMCreated = true
return syncInfo
}
func (ctrl *VMCloneController) verifyPVCBound(vmClone *clone.VirtualMachineClone, syncInfo syncInfoType) syncInfoType {
obj, exists, err := ctrl.restoreStore.GetByKey(getKey(*vmClone.Status.RestoreName, vmClone.Namespace))
if !exists {
syncInfo.setError(fmt.Errorf("restore %s is not created yet for clone %s", *vmClone.Status.RestoreName, vmClone.Name))
return syncInfo
} else if err != nil {
syncInfo.setError(fmt.Errorf("error getting restore %s from cache for clone %s: %v", *vmClone.Status.SnapshotName, vmClone.Name, err))
return syncInfo
}
restore := obj.(*snapshotv1.VirtualMachineRestore)
for _, volumeRestore := range restore.Status.Restores {
obj, exists, err = ctrl.pvcStore.GetByKey(getKey(volumeRestore.PersistentVolumeClaimName, vmClone.Namespace))
if !exists {
syncInfo.setError(fmt.Errorf("PVC %s is not created yet for clone %s", volumeRestore.PersistentVolumeClaimName, vmClone.Name))
return syncInfo
} else if err != nil {
syncInfo.setError(fmt.Errorf("error getting PVC %s from cache for clone %s: %v", volumeRestore.PersistentVolumeClaimName, vmClone.Name, err))
return syncInfo
}
pvc := obj.(*corev1.PersistentVolumeClaim)
if pvc.Status.Phase != corev1.ClaimBound {
log.Log.Object(vmClone).V(defaultVerbosityLevel).Infof("pvc %s for clone %s is not bound yet", pvc.Name, vmClone.Name)
return syncInfo
}
}
ctrl.logAndRecord(vmClone, PVCBound, fmt.Sprintf("all PVC for clone %s are bound", vmClone.Name))
syncInfo.pvcBound = true
return syncInfo
}
func (ctrl *VMCloneController) cleanupSnapshot(vmClone *clone.VirtualMachineClone, syncInfo syncInfoType) syncInfoType {
err := ctrl.client.VirtualMachineSnapshot(vmClone.Namespace).Delete(context.Background(), *vmClone.Status.SnapshotName, v1.DeleteOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
syncInfo.setError(fmt.Errorf("cannot clean up snapshot %s for clone %s", *vmClone.Status.SnapshotName, vmClone.Name))
return syncInfo
}
return syncInfo
}
func (ctrl *VMCloneController) cleanupRestore(vmClone *clone.VirtualMachineClone, syncInfo syncInfoType) syncInfoType {
err := ctrl.client.VirtualMachineRestore(vmClone.Namespace).Delete(context.Background(), *vmClone.Status.RestoreName, v1.DeleteOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
syncInfo.setError(fmt.Errorf("cannot clean up restore %s for clone %s", *vmClone.Status.RestoreName, vmClone.Name))
return syncInfo
}
return syncInfo
}
func (ctrl *VMCloneController) logAndRecord(vmClone *clone.VirtualMachineClone, event Event, msg string) {
ctrl.recorder.Eventf(vmClone, corev1.EventTypeNormal, string(event), msg)
log.Log.Object(vmClone).Infof("%s", msg)
}
func (ctrl *VMCloneController) getTargetType(vmClone *clone.VirtualMachineClone) cloneTargetType {
if vmClone.Spec.Target != nil {
return cloneTargetType(vmClone.Spec.Target.Kind)
} else {
return defaultType
}
}
func (ctrl *VMCloneController) getSource(vmClone *clone.VirtualMachineClone, name, namespace, sourceKind string, store cache.Store) (interface{}, error) {
key := getKey(name, namespace)
obj, exists, err := store.GetByKey(key)
if err != nil {
return nil, fmt.Errorf("error getting %s %s in namespace %s from cache: %v", sourceKind, name, namespace, err)
}
if !exists {
return nil, fmt.Errorf("%w: %s %s/%s", ErrSourceDoesntExist, sourceKind, namespace, name)
}
return obj, nil
}
func (ctrl *VMCloneController) getVmFromSnapshot(snapshot *snapshotv1.VirtualMachineSnapshot) (*k6tv1.VirtualMachine, error) {
content, err := ctrl.getSnapshotContent(snapshot)
if err != nil {
return nil, err
}
contentVmSpec := content.Spec.Source.VirtualMachine
vm := &k6tv1.VirtualMachine{
ObjectMeta: contentVmSpec.ObjectMeta,
Spec: contentVmSpec.Spec,
Status: contentVmSpec.Status,
}
return vm, nil
}
func (s *syncInfoType) setError(err error) {
s.err = err
}
func (s *syncInfoType) isFailingOrError() bool {
return s.err != nil || s.isCloneFailing
}
package clone
import (
"errors"
"fmt"
"time"
k8scorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
clonebase "kubevirt.io/api/clone"
clone "kubevirt.io/api/clone/v1beta1"
virtv1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/storage/snapshot"
)
type Event string
const (
defaultVerbosityLevel = 2
unknownTypeErrFmt = "clone controller expected object of type %s but found object of unknown type"
SnapshotCreated Event = "SnapshotCreated"
SnapshotReady Event = "SnapshotReady"
RestoreCreated Event = "RestoreCreated"
RestoreCreationFailed Event = "RestoreCreationFailed"
RestoreReady Event = "RestoreReady"
TargetVMCreated Event = "TargetVMCreated"
PVCBound Event = "PVCBound"
SnapshotDeleted Event = "SnapshotDeleted"
SnapshotContentInvalid Event = "SnapshotContentInvalid"
SourceDoesNotExist Event = "SourceDoesNotExist"
SourceWithBackendStorageInvalid Event = "SourceVMWithBackendStorageInvalid"
VMVolumeSnapshotsInvalid Event = "VMVolumeSnapshotsInvalid"
)
var (
ErrVolumeNotSnapshotable = "Virtual Machine volume %s does not support snapshots"
ErrVolumeSnapshotSupportUnknown = "Virtual Machine volume %s snapshot support unknown"
ErrVolumeNotBackedUp = "volume %s is not backed up in snapshot %s"
ErrSourceDoesntExist = errors.New("Source doesnt exist")
ErrSourceWithBackendStorage = errors.New("Clone of source with backendstorage is not supported")
)
type VMCloneController struct {
client kubecli.KubevirtClient
vmCloneIndexer cache.Indexer
snapshotStore cache.Store
restoreStore cache.Store
vmStore cache.Store
snapshotContentStore cache.Store
pvcStore cache.Store
recorder record.EventRecorder
vmCloneQueue workqueue.TypedRateLimitingInterface[string]
hasSynced func() bool
}
func NewVmCloneController(client kubecli.KubevirtClient, vmCloneInformer, snapshotInformer, restoreInformer, vmInformer, snapshotContentInformer, pvcInformer cache.SharedIndexInformer, recorder record.EventRecorder) (*VMCloneController, error) {
ctrl := VMCloneController{
client: client,
vmCloneIndexer: vmCloneInformer.GetIndexer(),
snapshotStore: snapshotInformer.GetStore(),
restoreStore: restoreInformer.GetStore(),
vmStore: vmInformer.GetStore(),
snapshotContentStore: snapshotContentInformer.GetStore(),
pvcStore: pvcInformer.GetStore(),
recorder: recorder,
vmCloneQueue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-vmclone"},
),
}
ctrl.hasSynced = func() bool {
return vmCloneInformer.HasSynced() && snapshotInformer.HasSynced() && restoreInformer.HasSynced() &&
vmInformer.HasSynced() && snapshotInformer.HasSynced() && pvcInformer.HasSynced()
}
_, err := vmCloneInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleVMClone,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleVMClone(newObj) },
DeleteFunc: ctrl.handleVMClone,
},
)
if err != nil {
return nil, err
}
_, err = snapshotInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleSnapshot,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleSnapshot(newObj) },
DeleteFunc: ctrl.handleSnapshot,
},
)
if err != nil {
return nil, err
}
_, err = restoreInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleRestore,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleRestore(newObj) },
DeleteFunc: ctrl.handleRestore,
},
)
if err != nil {
return nil, err
}
_, err = pvcInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handlePVC,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handlePVC(newObj) },
DeleteFunc: ctrl.handlePVC,
},
)
if err != nil {
return nil, err
}
_, err = vmInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: ctrl.handleAddedSourceVM,
UpdateFunc: func(oldObj, newObj interface{}) { ctrl.handleUpdateSourceVM(oldObj, newObj) },
DeleteFunc: ctrl.handleDeletedTargetVM,
},
)
if err != nil {
return nil, err
}
return &ctrl, nil
}
func (ctrl *VMCloneController) handleVMClone(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
vmClone, ok := obj.(*clone.VirtualMachineClone)
if !ok {
log.Log.Errorf(unknownTypeErrFmt, clonebase.ResourceVMCloneSingular)
return
}
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(vmClone)
if err != nil {
log.Log.Errorf("vm clone controller failed to get key from object: %v, %v", err, vmClone)
return
}
log.Log.V(defaultVerbosityLevel).Infof("enqueued %q for sync", objName)
ctrl.vmCloneQueue.Add(objName)
}
func (ctrl *VMCloneController) handleSnapshot(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
snapshot, ok := obj.(*snapshotv1.VirtualMachineSnapshot)
if !ok {
log.Log.Errorf(unknownTypeErrFmt, "virtualmachinesnapshot")
return
}
if ownedByClone, key := isOwnedByClone(snapshot); ownedByClone {
ctrl.vmCloneQueue.AddRateLimited(key)
}
snapshotKey, err := cache.MetaNamespaceKeyFunc(snapshot)
if err != nil {
log.Log.Object(snapshot).Reason(err).Error("cannot get snapshot key")
return
}
snapshotSourceKeys, err := ctrl.vmCloneIndexer.IndexKeys("snapshotSource", snapshotKey)
if err != nil {
log.Log.Object(snapshot).Reason(err).Error("cannot get clone snapshotSourceKeys from snapshotSource indexer")
return
}
snapshotWaitingKeys, err := ctrl.vmCloneIndexer.IndexKeys(string(clone.SnapshotInProgress), snapshotKey)
if err != nil {
log.Log.Object(snapshot).Reason(err).Error("cannot get clone snapshotWaitingKeys from " + string(clone.SnapshotInProgress) + " indexer")
return
}
for _, key := range append(snapshotSourceKeys, snapshotWaitingKeys...) {
ctrl.vmCloneQueue.AddRateLimited(key)
}
}
func (ctrl *VMCloneController) handleRestore(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
restore, ok := obj.(*snapshotv1.VirtualMachineRestore)
if !ok {
log.Log.Errorf(unknownTypeErrFmt, "virtualmachinerestore")
return
}
if ownedByClone, key := isOwnedByClone(restore); ownedByClone {
ctrl.vmCloneQueue.AddRateLimited(key)
}
restoreKey, err := cache.MetaNamespaceKeyFunc(restore)
if err != nil {
log.Log.Object(restore).Reason(err).Error("cannot get snapshot key")
return
}
restoreWaitingKeys, err := ctrl.vmCloneIndexer.IndexKeys(string(clone.RestoreInProgress), restoreKey)
if err != nil {
log.Log.Object(restore).Reason(err).Error("cannot get clone restoreWaitingKeys from " + string(clone.RestoreInProgress) + " indexer")
return
}
for _, key := range restoreWaitingKeys {
ctrl.vmCloneQueue.AddRateLimited(key)
}
}
func (ctrl *VMCloneController) handlePVC(obj interface{}) {
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
pvc, ok := obj.(*k8scorev1.PersistentVolumeClaim)
if !ok {
log.Log.Errorf(unknownTypeErrFmt, "persistentvolumeclaim")
return
}
var (
restoreName string
exists bool
)
if restoreName, exists = pvc.Annotations[snapshot.RestoreNameAnnotation]; !exists {
return
}
if pvc.Status.Phase != k8scorev1.ClaimBound {
return
}
restoreKey := getKey(restoreName, pvc.Namespace)
succeededWaitingKeys, err := ctrl.vmCloneIndexer.IndexKeys(string(clone.Succeeded), restoreKey)
if err != nil {
log.Log.Object(pvc).Reason(err).Error("cannot get clone succeededWaitingKeys from " + string(clone.Succeeded) + " indexer")
return
}
for _, key := range succeededWaitingKeys {
ctrl.vmCloneQueue.AddRateLimited(key)
}
}
func (ctrl *VMCloneController) handleAddedSourceVM(obj interface{}) {
vm, ok := obj.(*virtv1.VirtualMachine)
if !ok {
log.Log.Reason(fmt.Errorf("unexpected obj %#v", obj)).Error("Failed to process notification")
return
}
vmKey, err := cache.MetaNamespaceKeyFunc(vm)
if err != nil {
log.Log.Object(vm).Reason(err).Error("cannot get vm key")
return
}
keys, err := ctrl.vmCloneIndexer.IndexKeys("vmSource", vmKey)
if err != nil {
log.Log.Object(vm).Reason(err).Error("cannot get clone from vmSource indexer")
return
}
for _, k := range keys {
ctrl.vmCloneQueue.Add(k)
}
}
func (ctrl *VMCloneController) handleUpdateSourceVM(oldObj, newObj interface{}) {
oldVM, ok := oldObj.(*virtv1.VirtualMachine)
if !ok {
log.Log.Reason(fmt.Errorf("unexpected old obj %#v", oldObj)).Error("Failed to process notification")
return
}
newVM, ok := newObj.(*virtv1.VirtualMachine)
if !ok {
log.Log.Reason(fmt.Errorf("unexpected new obj %#v", newObj)).Error("Failed to process notification")
return
}
// we care only for updates in a vmsource volumeSnapshotStatuses
if equality.Semantic.DeepEqual(newVM.Status.VolumeSnapshotStatuses, oldVM.Status.VolumeSnapshotStatuses) {
return
}
vmKey, err := cache.MetaNamespaceKeyFunc(newVM)
if err != nil {
log.Log.Object(newVM).Reason(err).Error("cannot get vm key")
return
}
keys, err := ctrl.vmCloneIndexer.IndexKeys("vmSource", vmKey)
if err != nil {
log.Log.Object(newVM).Reason(err).Error("cannot get clone from vmSource indexer")
return
}
for _, k := range keys {
ctrl.vmCloneQueue.Add(k)
}
}
func (ctrl *VMCloneController) handleDeletedTargetVM(obj interface{}) {
vm, ok := obj.(*virtv1.VirtualMachine)
// When a delete is dropped, the relist will notice a vm in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error("Failed to process delete notification")
return
}
vm, ok = tombstone.Obj.(*virtv1.VirtualMachine)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a vm %#v", obj)).Error("Failed to process delete notification")
return
}
}
vmKey, err := cache.MetaNamespaceKeyFunc(vm)
if err != nil {
log.Log.Object(vm).Reason(err).Error("cannot get vm key")
return
}
keys, err := ctrl.vmCloneIndexer.IndexKeys("vmTarget", vmKey)
if err != nil {
log.Log.Object(vm).Reason(err).Error("cannot get clone from vmTarget indexer")
return
}
for _, k := range keys {
ctrl.vmCloneQueue.Add(k)
}
}
func (ctrl *VMCloneController) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer ctrl.vmCloneQueue.ShutDown()
log.Log.Info("Starting clone controller")
defer log.Log.Info("Shutting down clone controller")
if !cache.WaitForCacheSync(
stopCh,
ctrl.hasSynced,
) {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < threadiness; i++ {
go wait.Until(ctrl.runWorker, time.Second, stopCh)
}
<-stopCh
return nil
}
func (ctrl *VMCloneController) Execute() bool {
key, quit := ctrl.vmCloneQueue.Get()
if quit {
return false
}
defer ctrl.vmCloneQueue.Done(key)
err := ctrl.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing clone %v", key)
ctrl.vmCloneQueue.AddRateLimited(key)
} else {
log.Log.V(defaultVerbosityLevel).Infof("processed clone %v", key)
ctrl.vmCloneQueue.Forget(key)
}
return true
}
func (ctrl *VMCloneController) runWorker() {
for ctrl.Execute() {
}
}
package clone
import (
"fmt"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/kubevirt/pkg/pointer"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
"k8s.io/apimachinery/pkg/util/rand"
clone "kubevirt.io/api/clone/v1beta1"
v1 "kubevirt.io/api/core/v1"
)
const (
vmKind = "VirtualMachine"
kubevirtApiGroup = "kubevirt.io"
)
// variable so can be overridden in tests
var currentTime = func() *metav1.Time {
t := metav1.Now()
return &t
}
func getKey(name, namespace string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func generateNameWithRandomSuffix(names ...string) string {
const randomStringLength = 5
if len(names) == 0 {
return ""
}
generatedName := names[0]
for _, name := range names[1:] {
generatedName = fmt.Sprintf("%s-%s", generatedName, name)
}
// Kubernetes' object names have limit of 252 characters.
// For more info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
if len(generatedName) > 252 {
generatedName = "clone-object"
}
generatedName = fmt.Sprintf("%s-%s", generatedName, rand.String(randomStringLength))
return generatedName
}
func generateSnapshotName(vmCloneUID types.UID) string {
return fmt.Sprintf("tmp-snapshot-%s", string(vmCloneUID))
}
func generateRestoreName(vmCloneUID types.UID) string {
return fmt.Sprintf("tmp-restore-%s", string(vmCloneUID))
}
func generateVMName(oldVMName string) string {
return generateNameWithRandomSuffix(oldVMName, "clone")
}
func isInPhase(vmClone *clone.VirtualMachineClone, phase clone.VirtualMachineClonePhase) bool {
return vmClone.Status.Phase == phase
}
func generateSnapshot(vmClone *clone.VirtualMachineClone, sourceVM *v1.VirtualMachine) *snapshotv1.VirtualMachineSnapshot {
return &snapshotv1.VirtualMachineSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: generateSnapshotName(vmClone.UID),
Namespace: sourceVM.Namespace,
OwnerReferences: []metav1.OwnerReference{
getCloneOwnerReference(vmClone.Name, vmClone.UID),
},
},
Spec: snapshotv1.VirtualMachineSnapshotSpec{
Source: corev1.TypedLocalObjectReference{
Kind: vmKind,
Name: sourceVM.Name,
APIGroup: pointer.P(kubevirtApiGroup),
},
},
}
}
func generateRestore(targetInfo *corev1.TypedLocalObjectReference, sourceVMName, namespace, cloneName, snapshotName string, cloneUID types.UID, patches []string) *snapshotv1.VirtualMachineRestore {
targetInfo = targetInfo.DeepCopy()
if targetInfo.Name == "" {
targetInfo.Name = generateVMName(sourceVMName)
}
return &snapshotv1.VirtualMachineRestore{
ObjectMeta: metav1.ObjectMeta{
Name: generateRestoreName(cloneUID),
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{
getCloneOwnerReference(cloneName, cloneUID),
},
},
Spec: snapshotv1.VirtualMachineRestoreSpec{
Target: *targetInfo,
VirtualMachineSnapshotName: snapshotName,
Patches: patches,
},
}
}
func getCloneOwnerReference(cloneName string, cloneUID types.UID) metav1.OwnerReference {
return metav1.OwnerReference{
APIVersion: clone.VirtualMachineCloneKind.GroupVersion().String(),
Kind: clone.VirtualMachineCloneKind.Kind,
Name: cloneName,
UID: cloneUID,
Controller: pointer.P(true),
BlockOwnerDeletion: pointer.P(true),
}
}
// If the provided object is owned by a clone object, the first return parameter would be true
// and the second one would be the key of the clone. Otherwise, the first return parameter would
// be false and the second parameter is to be ignored.
func isOwnedByClone(obj metav1.Object) (isOwned bool, key string) {
cloneKind := clone.VirtualMachineCloneKind.Kind
cloneApiVersion := clone.VirtualMachineCloneKind.GroupVersion().String()
ownerRefs := obj.GetOwnerReferences()
for _, ownerRef := range ownerRefs {
if ownerRef.Kind != cloneKind || ownerRef.APIVersion != cloneApiVersion {
continue
}
key = getKey(ownerRef.Name, obj.GetNamespace())
return true, key
}
return false, ""
// TODO: Unit test this?
}
func updateCondition(conditions []clone.Condition, c clone.Condition, includeReason bool) []clone.Condition {
found := false
for i := range conditions {
if conditions[i].Type == c.Type {
if conditions[i].Status != c.Status || (includeReason && conditions[i].Reason != c.Reason) {
conditions[i] = c
}
found = true
break
}
}
if !found {
conditions = append(conditions, c)
}
return conditions
}
func updateCloneConditions(vmClone *clone.VirtualMachineClone, conditions ...clone.Condition) {
for _, cond := range conditions {
vmClone.Status.Conditions = updateCondition(vmClone.Status.Conditions, cond, true)
}
}
func newReadyCondition(status corev1.ConditionStatus, reason string) clone.Condition {
return clone.Condition{
Type: clone.ConditionReady,
Status: status,
Reason: reason,
LastTransitionTime: *currentTime(),
}
}
func newProgressingCondition(status corev1.ConditionStatus, reason string) clone.Condition {
return clone.Condition{
Type: clone.ConditionProgressing,
Status: status,
Reason: reason,
LastTransitionTime: *currentTime(),
}
}
// Copyright 2025 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package clone
import (
"k8s.io/client-go/util/workqueue"
clonev1beta1 "kubevirt.io/api/clone/v1beta1"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/testutils"
)
// These utilities are exposed here for the fuzzer in ./fuzz to use.
func ShutdownCtrlQueue(ctrl *VMCloneController) {
ctrl.vmCloneQueue.ShutDown()
}
func SetQueue(ctrl *VMCloneController, newQueue *testutils.MockWorkQueue[string]) {
ctrl.vmCloneQueue = newQueue
}
func AddToVmStore(ctrl *VMCloneController, vm *virtv1.VirtualMachine) {
ctrl.vmStore.Add(vm)
}
func AddTovmCloneIndexer(ctrl *VMCloneController, vmc *clonev1beta1.VirtualMachineClone) {
ctrl.vmCloneIndexer.Add(vmc)
}
func GetVmCloneQueue(ctrl *VMCloneController) workqueue.TypedRateLimitingInterface[string] {
return ctrl.vmCloneQueue
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package clone
import (
"encoding/json"
"fmt"
"regexp"
"strings"
"kubevirt.io/client-go/log"
clone "kubevirt.io/api/clone/v1beta1"
k6tv1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
)
func generatePatches(source *k6tv1.VirtualMachine, cloneSpec *clone.VirtualMachineCloneSpec) ([]string, error) {
patchSet := patch.New()
addMacAddressPatches(patchSet, source.Spec.Template.Spec.Domain.Devices.Interfaces, cloneSpec.NewMacAddresses)
addSmbiosSerialPatches(patchSet, source.Spec.Template.Spec.Domain.Firmware, cloneSpec.NewSMBiosSerial)
addRemovePatchesFromFilter(patchSet, source.Labels, cloneSpec.LabelFilters, "/metadata/labels")
addAnnotationPatches(patchSet, source.Annotations, cloneSpec.AnnotationFilters)
addRemovePatchesFromFilter(patchSet, source.Spec.Template.ObjectMeta.Labels, cloneSpec.Template.LabelFilters, "/spec/template/metadata/labels")
addRemovePatchesFromFilter(patchSet, source.Spec.Template.ObjectMeta.Annotations, cloneSpec.Template.AnnotationFilters, "/spec/template/metadata/annotations")
addFirmwareUUIDPatches(patchSet, source.Spec.Template.Spec.Domain.Firmware)
patches, err := generateStringPatchOperations(patchSet)
if err != nil {
return nil, err
}
patches = append(patches, cloneSpec.Patches...)
log.Log.V(defaultVerbosityLevel).Object(source).Infof("patches generated for vm %s clone: %v", source.Name, patches)
return patches, nil
}
func generateStringPatchOperations(set *patch.PatchSet) ([]string, error) {
var patches []string
for _, patchOp := range set.GetPatches() {
payloadBytes, err := json.Marshal(patchOp)
if err != nil {
return nil, err
}
patches = append(patches, string(payloadBytes))
}
return patches, nil
}
func addMacAddressPatches(patchSet *patch.PatchSet, interfaces []k6tv1.Interface, newMacAddresses map[string]string) {
for idx, iface := range interfaces {
// If a new mac address is not specified for the current interface an empty mac address would be assigned.
// This is OK for clusters that have Kube Mac Pool enabled. For clusters that don't have KMP it is the users'
// responsibility to assign new mac address to every network interface.
newMac := newMacAddresses[iface.Name]
patchSet.AddOption(patch.WithReplace(fmt.Sprintf("/spec/template/spec/domain/devices/interfaces/%d/macAddress", idx), newMac))
}
}
func addSmbiosSerialPatches(patchSet *patch.PatchSet, firmware *k6tv1.Firmware, newSMBiosSerial *string) {
if firmware == nil {
return
}
newSerial := ""
if newSMBiosSerial != nil {
newSerial = *newSMBiosSerial
}
patchSet.AddOption(patch.WithReplace("/spec/template/spec/domain/firmware/serial", newSerial))
}
func addAnnotationPatches(patchSet *patch.PatchSet, annotations map[string]string, filters []string) {
// Some keys are needed for restore functionality.
// Deleting the item from the annotation list prevents
// from remove patch being generated
delete(annotations, "restore.kubevirt.io/lastRestoreUID")
addRemovePatchesFromFilter(patchSet, annotations, filters, "/metadata/annotations")
}
func addRemovePatchesFromFilter(patchSet *patch.PatchSet, m map[string]string, filters []string, baseJSONPath string) {
if filters == nil {
return
}
var regularFilters, negationFilters []string
for _, filter := range filters {
// wildcard alone is not a legal wildcard
if filter == "*" {
regularFilters = append(regularFilters, ".*")
continue
}
if strings.HasPrefix(filter, "!") {
negationFilters = append(negationFilters, filter[1:])
} else {
regularFilters = append(regularFilters, filter)
}
}
matchRegex := func(regex, s string) (matched bool) {
var err error
matched, err = regexp.MatchString(regex, s)
if err != nil {
log.Log.Errorf("matching regex %s to string %s failed: %v", regex, s, err)
}
return matched
}
includedKeys := map[string]struct{}{}
// Negation filters have precedence, therefore regular filters would be applied first
for key := range m {
for _, filter := range regularFilters {
if matchRegex(filter, key) {
includedKeys[key] = struct{}{}
}
}
for _, negationFilter := range negationFilters {
if matchRegex(negationFilter, key) {
delete(includedKeys, key)
}
}
}
// Appending removal patches
for originalKey := range m {
if _, isIncluded := includedKeys[originalKey]; !isIncluded {
patchSet.AddOption(patch.WithRemove(fmt.Sprintf("%s/%s", baseJSONPath, patch.EscapeJSONPointer(originalKey))))
}
}
}
func addFirmwareUUIDPatches(patchSet *patch.PatchSet, firmware *k6tv1.Firmware) {
if firmware == nil {
return
}
patchSet.AddOption(patch.WithReplace("/spec/template/spec/domain/firmware/uuid", ""))
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package common
type SyncError interface {
error
Reason() string
// RequiresRequeue indicates if the sync error should trigger a requeue, or
// if information should just be added to the sync condition and a regular controller
// wakeup will resolve the situation.
RequiresRequeue() bool
}
func NewSyncError(err error, reason string) *syncErrorImpl {
return &syncErrorImpl{err, reason}
}
type syncErrorImpl struct {
err error
reason string
}
func (e *syncErrorImpl) Error() string {
return e.err.Error()
}
func (e *syncErrorImpl) Reason() string {
return e.reason
}
func (e *syncErrorImpl) RequiresRequeue() bool {
return true
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package descheduler
import (
"context"
"fmt"
k8sv1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
)
// EvictOnlyAnnotation indicates pods whose eviction is not expected to be completed right away.
// Instead, an eviction request is expected to be intercepted by an external component which will initiate the
// eviction process for the pod.
const EvictOnlyAnnotation = "descheduler.alpha.kubernetes.io/request-evict-only"
// EvictionInProgressAnnotation indicates pods whose eviction was initiated by an external component.
const EvictionInProgressAnnotation = "descheduler.alpha.kubernetes.io/eviction-in-progress"
// EvictPodAnnotationKeyAlpha can be used to explicitly opt-in a pod to be eventually descheduled.
// The descheduler will only check the presence of the annotation and not its value.
const EvictPodAnnotationKeyAlpha = "descheduler.alpha.kubernetes.io/evict"
// EvictPodAnnotationKeyAlphaPreferNoEviction can be used to explicitly opt-out a pod to be eventually descheduled.
// The descheduler will only check the presence of the annotation and not its value.
const EvictPodAnnotationKeyAlphaPreferNoEviction = "descheduler.alpha.kubernetes.io/prefer-no-eviction"
func MarkEvictionInProgress(virtClient kubecli.KubevirtClient, sourcePod *k8sv1.Pod) (*k8sv1.Pod, error) {
if _, exists := sourcePod.GetAnnotations()[EvictionInProgressAnnotation]; exists {
return sourcePod, nil
}
patchSet := patch.New(
patch.WithAdd(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(EvictionInProgressAnnotation)), "true"),
)
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return nil, err
}
pod, err := virtClient.CoreV1().Pods(sourcePod.Namespace).Patch(context.Background(), sourcePod.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
if err != nil {
log.Log.Object(sourcePod).Errorf("failed to add %s pod annotation: %v", EvictionInProgressAnnotation, err)
return nil, err
}
return pod, nil
}
func MarkEvictionCompleted(virtClient kubecli.KubevirtClient, sourcePod *k8sv1.Pod) (*k8sv1.Pod, error) {
if value, exists := sourcePod.GetAnnotations()[EvictionInProgressAnnotation]; exists {
patchSet := patch.New(
patch.WithTest(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(EvictionInProgressAnnotation)), value),
patch.WithRemove(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(EvictionInProgressAnnotation))),
)
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return nil, err
}
pod, err := virtClient.CoreV1().Pods(sourcePod.Namespace).Patch(context.Background(), sourcePod.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
if err != nil {
log.Log.Object(sourcePod).Errorf("failed to remove %s pod annotation : %v", EvictionInProgressAnnotation, err)
return nil, err
}
return pod, nil
}
return sourcePod, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package dra
import (
"context"
"fmt"
"reflect"
"time"
"github.com/google/go-cmp/cmp"
k8sv1 "k8s.io/api/core/v1"
resourcev1 "k8s.io/api/resource/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/trace"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
drautil "kubevirt.io/kubevirt/pkg/dra"
traceUtils "kubevirt.io/kubevirt/pkg/util/trace"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
const (
deleteNotifFailed = "Failed to process delete notification"
tombstoneGetObjectErrFmt = "couldn't get object from tombstone %+v"
indexByNodeName = "byNodeName"
PCIAddressDeviceAttributeKey = "resource.kubernetes.io/pciBusID"
MDevUUIDDeviceAttributeKey = "mDevUUID"
)
type DeviceInfo struct {
VMISpecClaimName string
VMISpecRequestName string
*v1.DeviceStatusInfo
}
type DRAStatusController struct {
clusterConfig *virtconfig.ClusterConfig
recorder record.EventRecorder
clientset kubecli.KubevirtClient
podIndexer cache.Indexer
resourceSliceIndexer cache.Indexer
vmiIndexer cache.Store
resourceClaimIndexer cache.Store
queue workqueue.TypedRateLimitingInterface[string]
hasSynced func() bool
}
func NewDRAStatusController(
clusterConfig *virtconfig.ClusterConfig,
vmiInformer,
podInformer,
resourceClaimInformer,
resourceSliceInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
clientset kubecli.KubevirtClient) (*DRAStatusController, error) {
c := &DRAStatusController{
clusterConfig: clusterConfig,
recorder: recorder,
clientset: clientset,
podIndexer: podInformer.GetIndexer(),
vmiIndexer: vmiInformer.GetStore(),
resourceClaimIndexer: resourceClaimInformer.GetStore(),
resourceSliceIndexer: resourceSliceInformer.GetIndexer(),
queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "dra-status-controller"},
),
}
c.hasSynced = func() bool {
return vmiInformer.HasSynced() && podInformer.HasSynced() &&
resourceClaimInformer.HasSynced() && resourceSliceInformer.HasSynced()
}
_, err := vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVirtualMachineInstance,
DeleteFunc: c.deleteVirtualMachineInstance,
UpdateFunc: c.updateVirtualMachineInstance,
})
if err != nil {
return nil, err
}
_, err = podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPod,
DeleteFunc: c.deletePod,
UpdateFunc: c.updatePod,
})
if err != nil {
return nil, err
}
err = c.resourceSliceIndexer.AddIndexers(map[string]cache.IndexFunc{
indexByNodeName: indexResourceSliceByNodeName,
})
if err != nil {
return nil, err
}
return c, nil
}
func (c *DRAStatusController) enqueueVirtualMachine(obj interface{}) {
vmi := obj.(*v1.VirtualMachineInstance)
logger := log.Log.Object(vmi)
if vmi.Status.Phase == v1.Running {
logger.V(6).Infof("skipping enqueing vmi to dra status controller queue")
return
}
key, err := controller.KeyFunc(vmi)
if err != nil {
logger.Object(vmi).Reason(err).Error("Failed to extract key from VirtualMachineInstance.")
return
}
c.queue.Add(key)
}
func (c *DRAStatusController) addVirtualMachineInstance(obj interface{}) {
c.enqueueVirtualMachine(obj)
}
func (c *DRAStatusController) updateVirtualMachineInstance(_, curr interface{}) {
c.enqueueVirtualMachine(curr)
}
func (c *DRAStatusController) deleteVirtualMachineInstance(obj interface{}) {
vmi, ok := obj.(*v1.VirtualMachineInstance)
// When a delete is dropped, the relist will notice a vmi in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf(tombstoneGetObjectErrFmt, obj)).Error(deleteNotifFailed)
return
}
vmi, ok = tombstone.Obj.(*v1.VirtualMachineInstance)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a vmi %#v", obj)).Error(deleteNotifFailed)
return
}
}
c.enqueueVirtualMachine(vmi)
}
func (c *DRAStatusController) addPod(obj interface{}) {
pod := obj.(*k8sv1.Pod)
if pod.DeletionTimestamp != nil {
// on a restart of the controller manager, it's possible a new pod shows up in a state that
// is already pending deletion. Prevent the pod from being a creation observation.
c.deletePod(pod)
return
}
controllerRef := metav1.GetControllerOf(pod)
vmi := c.resolveControllerRef(pod.Namespace, controllerRef)
if vmi == nil {
return
}
c.enqueueVirtualMachine(vmi)
}
func (c *DRAStatusController) deletePod(obj interface{}) {
pod, ok := obj.(*k8sv1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf(tombstoneGetObjectErrFmt, obj)).Error(deleteNotifFailed)
return
}
pod, ok = tombstone.Obj.(*k8sv1.Pod)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a pod %#v", obj)).Error(deleteNotifFailed)
return
}
}
controllerRef := metav1.GetControllerOf(pod)
vmi := c.resolveControllerRef(pod.Namespace, controllerRef)
if vmi == nil {
return
}
c.enqueueVirtualMachine(vmi)
}
func (c *DRAStatusController) updatePod(old interface{}, cur interface{}) {
curPod := cur.(*k8sv1.Pod)
oldPod := old.(*k8sv1.Pod)
if curPod.DeletionTimestamp != nil {
labelChanged := !equality.Semantic.DeepEqual(curPod.Labels, oldPod.Labels)
// having a pod marked for deletion is enough to count as a deletion expectation
c.deletePod(curPod)
if labelChanged {
// we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
c.deletePod(oldPod)
}
return
}
if curPod.Status.Phase == k8sv1.PodRunning || curPod.Status.Phase == k8sv1.PodFailed ||
curPod.Status.Phase == k8sv1.PodSucceeded {
return
}
curControllerRef := metav1.GetControllerOf(curPod)
oldControllerRef := metav1.GetControllerOf(oldPod)
controllerRefChanged := !equality.Semantic.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged {
// The ControllerRef was changed. Sync the old controller, if any.
if vmi := c.resolveControllerRef(oldPod.Namespace, oldControllerRef); vmi != nil {
c.enqueueVirtualMachine(vmi)
}
}
vmi := c.resolveControllerRef(curPod.Namespace, curControllerRef)
if vmi == nil {
return
}
c.enqueueVirtualMachine(vmi)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (c *DRAStatusController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *v1.VirtualMachineInstance {
if controllerRef != nil && controllerRef.Kind == "Pod" {
// This could be an attachment pod, look up the pod, and check if it is owned by a VMI.
obj, exists, err := c.podIndexer.GetByKey(namespace + "/" + controllerRef.Name)
if err != nil {
return nil
}
if !exists {
return nil
}
pod, _ := obj.(*k8sv1.Pod)
controllerRef = metav1.GetControllerOf(pod)
}
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it is nil or the wrong Kind.
if controllerRef == nil || controllerRef.Kind != v1.VirtualMachineInstanceGroupVersionKind.Kind {
return nil
}
vmi, exists, err := c.vmiIndexer.GetByKey(namespace + "/" + controllerRef.Name)
if err != nil {
return nil
}
if !exists {
return nil
}
if vmi.(*v1.VirtualMachineInstance).UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return vmi.(*v1.VirtualMachineInstance)
}
func (c *DRAStatusController) Run(threadiness int, stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.queue.ShutDown()
log.Log.Info("Starting DRA Status controller")
// Wait for cache sync before we start the pod controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping DRA Status controller")
}
func (c *DRAStatusController) runWorker() {
for c.Execute() {
}
}
var draStatusControllerWorkQueueTracer = &traceUtils.Tracer{Threshold: time.Second}
func (c *DRAStatusController) Execute() bool {
if !c.clusterConfig.GPUsWithDRAGateEnabled() && !c.clusterConfig.HostDevicesWithDRAEnabled() {
return false
}
key, quit := c.queue.Get()
if quit {
return false
}
draStatusControllerWorkQueueTracer.StartTrace(key, "dra-status-controller VMI workqueue", trace.Field{Key: "Workqueue Key", Value: key})
defer draStatusControllerWorkQueueTracer.StopTrace(key)
defer c.queue.Done(key)
err := c.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing VirtualMachineInstance %v", key)
c.queue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed VirtualMachineInstance %v", key)
c.queue.Forget(key)
}
return true
}
func (c *DRAStatusController) execute(key string) error {
obj, exists, err := c.vmiIndexer.GetByKey(key)
if err != nil {
return err
}
if !exists {
return nil
}
vmi := obj.(*v1.VirtualMachineInstance)
if vmi == nil {
return fmt.Errorf("nil vmi reference")
}
logger := log.Log.Object(vmi)
if vmi.DeletionTimestamp != nil {
// object is being deleted, do not process it
log.Log.Info("vmi being deleted, dra status controller skipping")
return nil
}
// Only consider pods which belong to this vmi
// excluding unfinalized migration targets from this list.
pod, err := controller.CurrentVMIPod(vmi, c.podIndexer)
if err != nil {
logger.Reason(err).Error("Failed to fetch pods for namespace from cache.")
return err
}
if pod == nil {
return fmt.Errorf("nil pod reference for vmi")
}
err = c.updateStatus(logger, vmi, pod)
if err != nil {
logger.Reason(err).Error("error updating status")
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, "VMIUpdateStatusFailedForDRADevices", "error updating status: %v", err)
return err
}
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, "VMIUpdatedForDRADevices", "updated status")
return nil
}
func (c *DRAStatusController) updateStatus(logger *log.FilteredLogger, vmi *v1.VirtualMachineInstance, pod *k8sv1.Pod) error {
key, err := controller.KeyFunc(vmi)
if err != nil {
return err
}
defer draStatusControllerWorkQueueTracer.StepTrace(key, "updateStatus", trace.Field{Key: "VMI Name", Value: vmi.Name})
if !isPodResourceClaimStatusFilled(logger, pod) {
logger.Infof("waiting for pod %s/%s resource claim status to be filled", pod.Namespace, pod.Name)
return nil
}
var (
gpuStatuses []v1.DeviceStatusInfo
hostDeviceStatuses []v1.DeviceStatusInfo
)
if c.clusterConfig.GPUsWithDRAGateEnabled() {
gpuDeviceInfo, err := getGPUDevicesFromVMISpec(vmi)
if err != nil {
return err
}
gpuStatuses, err = c.getGPUStatuses(gpuDeviceInfo, pod)
if err != nil {
return err
}
}
if c.clusterConfig.HostDevicesWithDRAEnabled() {
hostDeviceInfo, err := c.getHostDevicesFromVMISpec(vmi)
if err != nil {
return err
}
hostDeviceStatuses, err = c.getHostDeviceStatuses(hostDeviceInfo, pod)
if err != nil {
return err
}
}
newDeviceStatus := &v1.DeviceStatus{}
if gpuStatuses != nil {
newDeviceStatus.GPUStatuses = gpuStatuses
}
if hostDeviceStatuses != nil {
newDeviceStatus.HostDeviceStatuses = hostDeviceStatuses
}
allReconciled := true
if c.clusterConfig.GPUsWithDRAGateEnabled() {
allReconciled = drautil.IsAllDRAGPUsReconciled(vmi, newDeviceStatus)
}
if c.clusterConfig.HostDevicesWithDRAEnabled() {
allReconciled = allReconciled && drautil.IsAllDRAHostDevicesReconciled(vmi, newDeviceStatus)
}
if reflect.DeepEqual(vmi.Status.DeviceStatus, newDeviceStatus) && allReconciled {
logger.V(4).Infof("All enabled DRA devices are reconciled nothing more to do")
return nil
}
logger.V(4).Infof("updating VMI device status with DRA deviceattributes")
ps := patch.New(
patch.WithTest("/status/deviceStatus", vmi.Status.DeviceStatus),
patch.WithReplace("/status/deviceStatus", newDeviceStatus),
)
patchBytes, err := ps.GeneratePayload()
if err != nil {
return err
}
logger.V(4).Infof("patching vmi device status")
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.TODO(), vmi.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
logger.Errorf("error patching VMI: %#v, %#v", errors.ReasonForError(err), err)
return err
}
logger.V(6).Infof("patching vmi status successful")
return nil
}
func isPodResourceClaimStatusFilled(logger *log.FilteredLogger, pod *k8sv1.Pod) bool {
if pod.Status.ResourceClaimStatuses == nil {
return false
}
if len(pod.Spec.ResourceClaims) != len(pod.Status.ResourceClaimStatuses) {
var want, got []string
for _, status := range pod.Status.ResourceClaimStatuses {
if status.ResourceClaimName != nil {
got = append(got, status.Name)
}
}
for _, rc := range pod.Spec.ResourceClaims {
want = append(want, rc.Name)
}
logger.V(4).Infof("do not have enough resource claim statuses to proceed further, want vs got: %v",
cmp.Diff(want, got))
return false
}
logger.V(6).Infof("all the pod resource claim statuses have been filled")
return true
}
func getGPUDevicesFromVMISpec(vmi *v1.VirtualMachineInstance) ([]DeviceInfo, error) {
var gpuDevices []DeviceInfo
for _, gpu := range vmi.Spec.Domain.Devices.GPUs {
if !drautil.IsGPUDRA(gpu) {
continue
}
gpuDevices = append(gpuDevices, DeviceInfo{
VMISpecClaimName: *gpu.ClaimName,
VMISpecRequestName: *gpu.RequestName,
DeviceStatusInfo: &v1.DeviceStatusInfo{
Name: gpu.Name,
DeviceResourceClaimStatus: nil,
},
})
}
return gpuDevices, nil
}
func (c *DRAStatusController) getGPUStatuses(gpuInfos []DeviceInfo, pod *k8sv1.Pod) ([]v1.DeviceStatusInfo, error) {
statuses := make([]v1.DeviceStatusInfo, 0, len(gpuInfos))
for _, info := range gpuInfos {
st, err := c.getGPUStatus(info, pod)
if err != nil {
return nil, err
}
statuses = append(statuses, st)
}
return statuses, nil
}
func (c *DRAStatusController) getGPUStatus(gpuInfo DeviceInfo, pod *k8sv1.Pod) (v1.DeviceStatusInfo, error) {
gpuStatus := v1.DeviceStatusInfo{
Name: gpuInfo.Name,
DeviceResourceClaimStatus: &v1.DeviceResourceClaimStatus{
ResourceClaimName: getResourceClaimNameForDevice(gpuInfo.VMISpecClaimName, pod),
},
}
if gpuStatus.DeviceResourceClaimStatus.ResourceClaimName == nil {
return gpuStatus, nil
}
device, err := c.getAllocatedDevice(pod.Namespace, *gpuStatus.DeviceResourceClaimStatus.ResourceClaimName, gpuInfo.VMISpecRequestName)
if err != nil {
return gpuStatus, err
}
if device == nil {
return gpuStatus, nil
}
gpuStatus.DeviceResourceClaimStatus.Name = &device.Device
pciAddress, mDevUUID, err := c.getDeviceAttributes(pod.Spec.NodeName, device.Device, device.Driver)
if err != nil {
return gpuStatus, err
}
attrs := v1.DeviceAttribute{}
if pciAddress != "" {
attrs.PCIAddress = &pciAddress
}
if mDevUUID != "" {
attrs.MDevUUID = &mDevUUID
}
gpuStatus.DeviceResourceClaimStatus.Attributes = &attrs
return gpuStatus, nil
}
func getResourceClaimNameForDevice(claimName string, pod *k8sv1.Pod) *string {
for _, rc := range pod.Status.ResourceClaimStatuses {
if rc.Name == claimName {
return rc.ResourceClaimName
}
}
return nil
}
func (c *DRAStatusController) getAllocatedDevice(resourceClaimNamespace, resourceClaimName, requestName string) (*resourcev1.DeviceRequestAllocationResult, error) {
key := controller.NamespacedKey(resourceClaimNamespace, resourceClaimName)
obj, exists, err := c.resourceClaimIndexer.GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("resource claim %s does not exist", key)
}
resourceClaim := obj.(*resourcev1.ResourceClaim)
if resourceClaim.Status.Allocation == nil {
return nil, nil
}
if resourceClaim.Status.Allocation.Devices.Results == nil {
return nil, nil
}
for _, status := range resourceClaim.Status.Allocation.Devices.Results {
if status.Request == requestName {
return status.DeepCopy(), nil
}
}
return nil, nil
}
// getDeviceAttributes returns the pciAddress and mdevUUID of the device. It will return both if found, otherwise it will return empty strings
func (c *DRAStatusController) getDeviceAttributes(nodeName string, deviceName, driverName string) (string, string, error) {
resourceSlices, err := c.resourceSliceIndexer.ByIndex(indexByNodeName, nodeName)
if err != nil {
return "", "", err
}
if len(resourceSlices) == 0 {
return "", "", fmt.Errorf("no resource slice objects found in cache")
}
pciAddress := ""
mdevUUID := ""
for _, obj := range resourceSlices {
rs := obj.(*resourcev1.ResourceSlice)
if rs.Spec.Driver == driverName {
for _, device := range rs.Spec.Devices {
if device.Name == deviceName {
for key, value := range device.Attributes {
if string(key) == PCIAddressDeviceAttributeKey {
pciAddress = *value.StringValue
} else if string(key) == MDevUUIDDeviceAttributeKey {
mdevUUID = *value.StringValue
}
}
if pciAddress == "" && mdevUUID == "" {
return "", "", fmt.Errorf("neither pciAddress nor mdevUUIDa attribute found for device %s", deviceName)
}
return pciAddress, mdevUUID, nil
}
}
}
}
return pciAddress, mdevUUID, nil
}
func indexResourceSliceByNodeName(obj interface{}) ([]string, error) {
rs, ok := obj.(*resourcev1.ResourceSlice)
if !ok {
return nil, nil
}
if rs.Spec.NodeName == nil {
return nil, nil
}
return []string{*rs.Spec.NodeName}, nil
}
func (c *DRAStatusController) getHostDevicesFromVMISpec(vmi *v1.VirtualMachineInstance) ([]DeviceInfo, error) {
var hostDevices []DeviceInfo
for _, hostDevice := range vmi.Spec.Domain.Devices.HostDevices {
if !drautil.IsHostDeviceDRA(hostDevice) {
continue
}
hostDevices = append(hostDevices, DeviceInfo{
VMISpecClaimName: *hostDevice.ClaimRequest.ClaimName,
VMISpecRequestName: *hostDevice.ClaimRequest.RequestName,
DeviceStatusInfo: &v1.DeviceStatusInfo{
Name: hostDevice.Name,
DeviceResourceClaimStatus: nil,
},
})
}
return hostDevices, nil
}
func (c *DRAStatusController) getHostDeviceStatuses(hostDeviceInfos []DeviceInfo, pod *k8sv1.Pod) ([]v1.DeviceStatusInfo, error) {
statuses := make([]v1.DeviceStatusInfo, 0, len(hostDeviceInfos))
for _, info := range hostDeviceInfos {
st, err := c.getHostDeviceStatus(info, pod)
if err != nil {
return nil, err
}
statuses = append(statuses, st)
}
return statuses, nil
}
func (c *DRAStatusController) getHostDeviceStatus(hostDeviceInfo DeviceInfo, pod *k8sv1.Pod) (v1.DeviceStatusInfo, error) {
hostDeviceStatus := v1.DeviceStatusInfo{
Name: hostDeviceInfo.Name,
DeviceResourceClaimStatus: &v1.DeviceResourceClaimStatus{
ResourceClaimName: getResourceClaimNameForDevice(hostDeviceInfo.VMISpecClaimName, pod),
},
}
if hostDeviceStatus.DeviceResourceClaimStatus.ResourceClaimName == nil {
return hostDeviceStatus, nil
}
device, err := c.getAllocatedDevice(pod.Namespace, *hostDeviceStatus.DeviceResourceClaimStatus.ResourceClaimName, hostDeviceInfo.VMISpecRequestName)
if err != nil {
return hostDeviceStatus, err
}
if device == nil {
return hostDeviceStatus, nil
}
hostDeviceStatus.DeviceResourceClaimStatus.Name = &device.Device
pciAddress, mDevUUID, err := c.getDeviceAttributes(pod.Spec.NodeName, device.Device, device.Driver)
if err != nil {
return hostDeviceStatus, err
}
attrs := v1.DeviceAttribute{}
if pciAddress != "" {
attrs.PCIAddress = &pciAddress
}
if mDevUUID != "" {
attrs.MDevUUID = &mDevUUID
}
hostDeviceStatus.DeviceResourceClaimStatus.Attributes = &attrs
return hostDeviceStatus, nil
}
package disruptionbudget
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/equality"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/libvmi"
"kubevirt.io/kubevirt/pkg/util/pdbs"
)
const deleteNotifFail = "Failed to process delete notification"
const (
// FailedDeletePodDisruptionBudgetReason is added in an event if deleting a PodDisruptionBudget failed.
FailedDeletePodDisruptionBudgetReason = "FailedDelete"
// SuccessfulDeletePodDisruptionBudgetReason is added in an event if deleting a PodDisruptionBudget succeeded.
SuccessfulDeletePodDisruptionBudgetReason = "SuccessfulDelete"
)
type DisruptionBudgetController struct {
clientset kubecli.KubevirtClient
Queue workqueue.TypedRateLimitingInterface[string]
vmiStore cache.Store
pdbIndexer cache.Indexer
recorder record.EventRecorder
podDisruptionBudgetExpectations *controller.UIDTrackingControllerExpectations
hasSynced func() bool
}
func NewDisruptionBudgetController(
vmiInformer cache.SharedIndexInformer,
pdbInformer cache.SharedIndexInformer,
podInformer cache.SharedIndexInformer,
migrationInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
clientset kubecli.KubevirtClient,
) (*DisruptionBudgetController, error) {
c := &DisruptionBudgetController{
Queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-disruption-budget"},
),
vmiStore: vmiInformer.GetStore(),
pdbIndexer: pdbInformer.GetIndexer(),
recorder: recorder,
clientset: clientset,
podDisruptionBudgetExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
}
c.hasSynced = func() bool {
return vmiInformer.HasSynced() && pdbInformer.HasSynced() && podInformer.HasSynced() && migrationInformer.HasSynced()
}
_, err := vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVirtualMachineInstance,
DeleteFunc: c.deleteVirtualMachineInstance,
UpdateFunc: c.updateVirtualMachineInstance,
})
if err != nil {
return nil, err
}
_, err = pdbInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPodDisruptionBudget,
DeleteFunc: c.deletePodDisruptionBudget,
UpdateFunc: c.updatePodDisruptionBudget,
})
if err != nil {
return nil, err
}
_, err = podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updatePod,
})
if err != nil {
return nil, err
}
_, err = migrationInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updateMigration,
})
if err != nil {
return nil, err
}
return c, nil
}
func (c *DisruptionBudgetController) updateMigration(_, curr interface{}) {
vmim := curr.(*virtv1.VirtualMachineInstanceMigration)
if vmim.DeletionTimestamp != nil {
return
}
vmi := &virtv1.VirtualMachineInstance{
ObjectMeta: v1.ObjectMeta{
Namespace: vmim.GetNamespace(),
Name: vmim.Spec.VMIName,
},
}
c.enqueueVirtualMachine(vmi)
}
func (c *DisruptionBudgetController) updatePod(_, curr interface{}) {
pod := curr.(*corev1.Pod)
if pod.DeletionTimestamp != nil {
return
}
controllerRef := v1.GetControllerOf(pod)
if controllerRef == nil {
return
}
vmi := c.resolveControllerRef(pod.Namespace, controllerRef)
if vmi == nil {
return
}
c.enqueueVirtualMachine(vmi)
}
func (c *DisruptionBudgetController) addVirtualMachineInstance(obj interface{}) {
c.enqueueVMI(obj)
}
func (c *DisruptionBudgetController) deleteVirtualMachineInstance(obj interface{}) {
c.enqueueVMI(obj)
}
func (c *DisruptionBudgetController) updateVirtualMachineInstance(_, curr interface{}) {
c.enqueueVMI(curr)
}
func (c *DisruptionBudgetController) enqueueVMI(obj interface{}) {
logger := log.Log
vmi, ok := obj.(*virtv1.VirtualMachineInstance)
// When a delete is dropped, the relist will notice a pdb in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pdb
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error(deleteNotifFail)
return
}
vmi, ok = tombstone.Obj.(*virtv1.VirtualMachineInstance)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a pdb %#v", obj)).Error(deleteNotifFail)
return
}
}
key, err := controller.KeyFunc(vmi)
if err != nil {
logger.Object(vmi).Reason(err).Error("Failed to extract key from vmi.")
}
c.Queue.Add(key)
}
// When a pdb is created, enqueue the vmi that manages it and update its pdbExpectations.
func (c *DisruptionBudgetController) addPodDisruptionBudget(obj interface{}) {
pdb := obj.(*policyv1.PodDisruptionBudget)
if pdb.DeletionTimestamp != nil {
// on a restart of the controller manager, it's possible a new pdb shows up in a state that
// is already pending deletion. Prevent the pdb from being a creation observation.
c.deletePodDisruptionBudget(pdb)
return
}
controllerRef := v1.GetControllerOf(pdb)
vmi := c.resolveControllerRef(pdb.Namespace, controllerRef)
if vmi == nil {
return
}
vmiKey, err := controller.KeyFunc(vmi)
if err != nil {
return
}
log.Log.V(4).Object(pdb).Infof("PodDisruptionBudget created")
c.podDisruptionBudgetExpectations.CreationObserved(vmiKey)
c.enqueueVirtualMachine(vmi)
}
// When a pdb is updated, figure out what vmi/s manage it and wake them
// up. If the labels of the pdb have changed we need to awaken both the old
// and new vmi. old and cur must be *v1.PodDisruptionBudget types.
func (c *DisruptionBudgetController) updatePodDisruptionBudget(old, cur interface{}) {
curPodDisruptionBudget := cur.(*policyv1.PodDisruptionBudget)
oldPodDisruptionBudget := old.(*policyv1.PodDisruptionBudget)
if curPodDisruptionBudget.ResourceVersion == oldPodDisruptionBudget.ResourceVersion {
// Periodic resync will send update events for all known pdbs.
// Two different versions of the same pdb will always have different RVs.
return
}
if curPodDisruptionBudget.DeletionTimestamp != nil {
labelChanged := !equality.Semantic.DeepEqual(curPodDisruptionBudget.Labels, oldPodDisruptionBudget.Labels)
// having a pdb marked for deletion is enough to count as a deletion expectation
c.deletePodDisruptionBudget(curPodDisruptionBudget)
if labelChanged {
// we don't need to check the oldPodDisruptionBudget.DeletionTimestamp because DeletionTimestamp cannot be unset.
c.deletePodDisruptionBudget(oldPodDisruptionBudget)
}
return
}
curControllerRef := v1.GetControllerOf(curPodDisruptionBudget)
oldControllerRef := v1.GetControllerOf(oldPodDisruptionBudget)
controllerRefChanged := !equality.Semantic.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged {
// The ControllerRef was changed. Sync the old controller, if any.
if vmi := c.resolveControllerRef(oldPodDisruptionBudget.Namespace, oldControllerRef); vmi != nil {
c.enqueueVirtualMachine(vmi)
}
}
vmi := c.resolveControllerRef(curPodDisruptionBudget.Namespace, curControllerRef)
if vmi == nil {
return
}
log.Log.V(4).Object(curPodDisruptionBudget).Infof("PodDisruptionBudget updated")
c.enqueueVirtualMachine(vmi)
return
}
// When a pdb is deleted, enqueue the vmi that manages the pdb and update its pdbExpectations.
// obj could be an *v1.PodDisruptionBudget, or a DeletionFinalStateUnknown marker item.
func (c *DisruptionBudgetController) deletePodDisruptionBudget(obj interface{}) {
pdb, ok := obj.(*policyv1.PodDisruptionBudget)
// When a delete is dropped, the relist will notice a pdb in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pdb
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error(deleteNotifFail)
return
}
pdb, ok = tombstone.Obj.(*policyv1.PodDisruptionBudget)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a pdb %#v", obj)).Error(deleteNotifFail)
return
}
}
controllerRef := v1.GetControllerOf(pdb)
vmi := c.resolveControllerRef(pdb.Namespace, controllerRef)
if vmi == nil {
return
}
vmiKey, err := controller.KeyFunc(vmi)
if err != nil {
return
}
key, err := controller.KeyFunc(pdb)
if err != nil {
return
}
c.podDisruptionBudgetExpectations.DeletionObserved(vmiKey, key)
c.enqueueVirtualMachine(vmi)
}
func (c *DisruptionBudgetController) enqueueVirtualMachine(obj interface{}) {
logger := log.Log
vmi := obj.(*virtv1.VirtualMachineInstance)
key, err := controller.KeyFunc(vmi)
if err != nil {
logger.Object(vmi).Reason(err).Error("Failed to extract key from virtualmachineinstance.")
return
}
c.Queue.Add(key)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (c *DisruptionBudgetController) resolveControllerRef(namespace string, controllerRef *v1.OwnerReference) *virtv1.VirtualMachineInstance {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it is nil or the wrong Kind.
if controllerRef == nil || controllerRef.Kind != virtv1.VirtualMachineInstanceGroupVersionKind.Kind {
return nil
}
return &virtv1.VirtualMachineInstance{
ObjectMeta: v1.ObjectMeta{
Name: controllerRef.Name,
Namespace: namespace,
UID: controllerRef.UID,
},
}
}
// Run runs the passed in NodeController.
func (c *DisruptionBudgetController) Run(threadiness int, stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.Queue.ShutDown()
log.Log.Info("Starting disruption budget controller.")
// Wait for cache sync before we start the node controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping disruption budget controller.")
}
func (c *DisruptionBudgetController) runWorker() {
for c.Execute() {
}
}
func (c *DisruptionBudgetController) Execute() bool {
key, quit := c.Queue.Get()
if quit {
return false
}
defer c.Queue.Done(key)
err := c.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing VirtualMachineInstance %v", key)
c.Queue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed VirtualMachineInstance %v", key)
c.Queue.Forget(key)
}
return true
}
func (c *DisruptionBudgetController) execute(key string) error {
if !c.podDisruptionBudgetExpectations.SatisfiedExpectations(key) {
return nil
}
// Fetch the latest Vm state from cache
obj, vmiExists, err := c.vmiStore.GetByKey(key)
if err != nil {
return err
}
var vmi *virtv1.VirtualMachineInstance
// Once all finalizers are removed the vmi gets deleted and we can clean all expectations
if vmiExists {
vmi = obj.(*virtv1.VirtualMachineInstance)
} else {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
log.DefaultLogger().Reason(err).Error("Could not extract namespace and name from the controller key.")
return err
}
vmi = libvmi.New(libvmi.WithName(name), libvmi.WithNamespace(namespace))
}
// Only consider pdbs which belong to this vmi
pdbs, err := pdbs.PDBsForVMI(vmi, c.pdbIndexer)
if err != nil {
log.DefaultLogger().Reason(err).Error("Failed to fetch pod disruption budgets for namespace from cache.")
// If the situation does not change there is no benefit in retrying
return nil
}
if len(pdbs) == 0 {
return nil
}
for i := range pdbs {
if syncErr := c.deletePDB(key, pdbs[i], vmi); syncErr != nil {
err = syncErr
}
}
return err
}
func (c *DisruptionBudgetController) deletePDB(key string, pdb *policyv1.PodDisruptionBudget, vmi *virtv1.VirtualMachineInstance) error {
if pdb != nil && pdb.DeletionTimestamp == nil {
pdbKey, err := cache.MetaNamespaceKeyFunc(pdb)
if err != nil {
return err
}
c.podDisruptionBudgetExpectations.ExpectDeletions(key, []string{pdbKey})
err = c.clientset.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Delete(context.Background(), pdb.Name, v1.DeleteOptions{})
if err != nil {
c.podDisruptionBudgetExpectations.DeletionObserved(key, pdbKey)
c.recorder.Eventf(vmi, corev1.EventTypeWarning, FailedDeletePodDisruptionBudgetReason, "Error deleting the PodDisruptionBudget %s: %v", pdb.Name, err)
return err
}
c.recorder.Eventf(vmi, corev1.EventTypeNormal, SuccessfulDeletePodDisruptionBudgetReason, "Deleted PodDisruptionBudget %s", pdb.Name)
}
return nil
}
package evacuation
import (
"context"
"fmt"
"math"
"sync"
"time"
k8sv1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/pointer"
migrationutils "kubevirt.io/kubevirt/pkg/util/migrations"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
const (
deleteNotifFail = "Failed to process delete notification"
getObjectErrFmt = "couldn't get object from tombstone %+v"
objectNotMigrationFmt = "tombstone contained object that is not a migration %#v"
)
const (
// FailedCreateVirtualMachineInstanceMigrationReason is added in an event if creating a VirtualMachineInstanceMigration failed.
FailedCreateVirtualMachineInstanceMigrationReason = "FailedCreate"
// SuccessfulCreateVirtualMachineInstanceMigrationReason is added in an event if creating a VirtualMachineInstanceMigration succeeded.
SuccessfulCreateVirtualMachineInstanceMigrationReason = "SuccessfulCreate"
)
type EvacuationController struct {
clientset kubecli.KubevirtClient
Queue workqueue.TypedRateLimitingInterface[string]
vmiIndexer cache.Indexer
vmiPodIndexer cache.Indexer
migrationIndexer cache.Indexer
recorder record.EventRecorder
migrationExpectations *controller.UIDTrackingControllerExpectations
nodeStore cache.Store
clusterConfig *virtconfig.ClusterConfig
hasSynced func() bool
}
func NewEvacuationController(
vmiInformer cache.SharedIndexInformer,
migrationInformer cache.SharedIndexInformer,
nodeInformer cache.SharedIndexInformer,
vmiPodInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
clientset kubecli.KubevirtClient,
clusterConfig *virtconfig.ClusterConfig,
) (*EvacuationController, error) {
c := &EvacuationController{
Queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-evacuation"},
),
vmiIndexer: vmiInformer.GetIndexer(),
migrationIndexer: migrationInformer.GetIndexer(),
nodeStore: nodeInformer.GetStore(),
vmiPodIndexer: vmiPodInformer.GetIndexer(),
recorder: recorder,
clientset: clientset,
migrationExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
clusterConfig: clusterConfig,
}
c.hasSynced = func() bool {
return vmiInformer.HasSynced() && vmiPodInformer.HasSynced() && migrationInformer.HasSynced() && nodeInformer.HasSynced()
}
_, err := vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVirtualMachineInstance,
DeleteFunc: c.deleteVirtualMachineInstance,
UpdateFunc: c.updateVirtualMachineInstance,
})
if err != nil {
return nil, err
}
_, err = migrationInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addMigration,
DeleteFunc: c.deleteMigration,
UpdateFunc: c.updateMigration,
})
if err != nil {
return nil, err
}
_, err = nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addNode,
DeleteFunc: c.deleteNode,
UpdateFunc: c.updateNode,
})
if err != nil {
return nil, err
}
return c, nil
}
func (c *EvacuationController) addNode(obj interface{}) {
c.enqueueNode(obj)
}
func (c *EvacuationController) deleteNode(obj interface{}) {
c.enqueueNode(obj)
}
func (c *EvacuationController) updateNode(_, curr interface{}) {
c.enqueueNode(curr)
}
func (c *EvacuationController) enqueueNode(obj interface{}) {
logger := log.Log
node := obj.(*k8sv1.Node)
key, err := controller.KeyFunc(node)
if err != nil {
logger.Object(node).Reason(err).Error("Failed to extract key from node.")
return
}
c.Queue.Add(key)
}
func (c *EvacuationController) addVirtualMachineInstance(obj interface{}) {
c.enqueueVMI(obj)
}
func (c *EvacuationController) deleteVirtualMachineInstance(obj interface{}) {
c.enqueueVMI(obj)
}
func (c *EvacuationController) updateVirtualMachineInstance(_, curr interface{}) {
c.enqueueVMI(curr)
}
func (c *EvacuationController) enqueueVMI(obj interface{}) {
vmi, ok := obj.(*virtv1.VirtualMachineInstance)
// When a delete is dropped, the relist will notice a migration in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the migration
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf(getObjectErrFmt, obj)).Error(deleteNotifFail)
return
}
vmi, ok = tombstone.Obj.(*virtv1.VirtualMachineInstance)
if !ok {
log.Log.Reason(fmt.Errorf(objectNotMigrationFmt, obj)).Error(deleteNotifFail)
return
}
}
node := c.nodeFromVMI(vmi)
if node != "" {
c.Queue.Add(node)
}
}
func (c *EvacuationController) nodeFromVMI(obj interface{}) string {
vmi, ok := obj.(*virtv1.VirtualMachineInstance)
// When a delete is dropped, the relist will notice a migration in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the migration
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf(getObjectErrFmt, obj)).Error(deleteNotifFail)
return ""
}
vmi, ok = tombstone.Obj.(*virtv1.VirtualMachineInstance)
if !ok {
log.Log.Reason(fmt.Errorf(objectNotMigrationFmt, obj)).Error(deleteNotifFail)
return ""
}
}
return vmi.Status.NodeName
}
func (c *EvacuationController) addMigration(obj interface{}) {
migration := obj.(*virtv1.VirtualMachineInstanceMigration)
node := ""
// only observe the migration expectation if our controller created it
key, ok := migration.Annotations[virtv1.EvacuationMigrationAnnotation]
if ok {
c.migrationExpectations.CreationObserved(key)
node = key
} else {
o, exists, err := c.vmiIndexer.GetByKey(controller.NamespacedKey(migration.Namespace, migration.Spec.VMIName))
if err != nil {
return
}
if exists {
node = c.nodeFromVMI(o)
}
}
if node != "" {
c.Queue.Add(node)
}
}
func (c *EvacuationController) deleteMigration(obj interface{}) {
c.enqueueMigration(obj)
}
func (c *EvacuationController) updateMigration(_, curr interface{}) {
c.enqueueMigration(curr)
}
func (c *EvacuationController) enqueueMigration(obj interface{}) {
migration, ok := obj.(*virtv1.VirtualMachineInstanceMigration)
// When a delete is dropped, the relist will notice a migration in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the migration
// changed labels the new migration will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf(getObjectErrFmt, obj)).Error(deleteNotifFail)
return
}
migration, ok = tombstone.Obj.(*virtv1.VirtualMachineInstanceMigration)
if !ok {
log.Log.Reason(fmt.Errorf(objectNotMigrationFmt, obj)).Error(deleteNotifFail)
return
}
}
o, exists, err := c.vmiIndexer.GetByKey(controller.NamespacedKey(migration.Namespace, migration.Spec.VMIName))
if err != nil {
return
}
if exists {
c.enqueueVMI(o)
}
}
func (c *EvacuationController) enqueueVirtualMachine(obj interface{}) {
logger := log.Log
vmi := obj.(*virtv1.VirtualMachineInstance)
key, err := controller.KeyFunc(vmi)
if err != nil {
logger.Object(vmi).Reason(err).Error("Failed to extract key from virtualmachineinstance.")
return
}
c.Queue.Add(key)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (c *EvacuationController) resolveControllerRef(namespace string, controllerRef *v1.OwnerReference) *virtv1.VirtualMachineInstance {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it is nil or the wrong Kind.
if controllerRef == nil || controllerRef.Kind != virtv1.VirtualMachineInstanceGroupVersionKind.Kind {
return nil
}
vmi, exists, err := c.vmiIndexer.GetByKey(controller.NamespacedKey(namespace, controllerRef.Name))
if err != nil {
return nil
}
if !exists {
return nil
}
return vmi.(*virtv1.VirtualMachineInstance)
}
// Run runs the passed in NodeController.
func (c *EvacuationController) Run(threadiness int, stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.Queue.ShutDown()
log.Log.Info("Starting evacuation controller.")
// Wait for cache sync before we start the node controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping evacuation controller.")
}
func (c *EvacuationController) runWorker() {
for c.Execute() {
}
}
func (c *EvacuationController) Execute() bool {
key, quit := c.Queue.Get()
if quit {
return false
}
defer c.Queue.Done(key)
err := c.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing VirtualMachineInstance %v", key)
c.Queue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed VirtualMachineInstance %v", key)
c.Queue.Forget(key)
}
return true
}
func (c *EvacuationController) execute(key string) error {
// Fetch the latest node state from cache
obj, exists, err := c.nodeStore.GetByKey(key)
if err != nil {
return err
}
if !exists {
c.migrationExpectations.DeleteExpectations(key)
return nil
}
if !c.migrationExpectations.SatisfiedExpectations(key) {
return nil
}
node := obj.(*k8sv1.Node)
vmis, err := c.listVMIsOnNode(node.Name)
if err != nil {
return fmt.Errorf("failed to list VMIs on node: %v", err)
}
migrations := migrationutils.ListUnfinishedMigrations(c.migrationIndexer)
return c.sync(node, vmis, migrations)
}
func getMarkedForEvictionVMIs(vmis []*virtv1.VirtualMachineInstance) []*virtv1.VirtualMachineInstance {
var evictionCandidates []*virtv1.VirtualMachineInstance
for _, vmi := range vmis {
if vmi.IsMarkedForEviction() && !hasMigratedOnEviction(vmi) && !migrationutils.IsMigrating(vmi) {
evictionCandidates = append(evictionCandidates, vmi)
}
}
return evictionCandidates
}
func GenerateNewMigration(vmi *virtv1.VirtualMachineInstance, key string, config *virtconfig.ClusterConfig) *virtv1.VirtualMachineInstanceMigration {
annotations := map[string]string{
virtv1.EvacuationMigrationAnnotation: key,
}
mig := &virtv1.VirtualMachineInstanceMigration{
ObjectMeta: v1.ObjectMeta{
Annotations: annotations,
GenerateName: "kubevirt-evacuation-",
},
Spec: virtv1.VirtualMachineInstanceMigrationSpec{
VMIName: vmi.Name,
},
}
if config.MigrationPriorityQueueEnabled() {
mig.Spec.Priority = pointer.P(virtv1.PrioritySystemCritical)
if value, exists := vmi.GetAnnotations()[virtv1.EvictionSourceAnnotation]; exists && value == "descheduler" {
mig.Spec.Priority = pointer.P(virtv1.PrioritySystemMaintenance)
}
}
return mig
}
func (c *EvacuationController) sync(node *k8sv1.Node, vmisOnNode []*virtv1.VirtualMachineInstance, activeMigrations []*virtv1.VirtualMachineInstanceMigration) error {
// If the node has no drain taint, we have nothing to do
taintKey := *c.clusterConfig.GetMigrationConfiguration().NodeDrainTaintKey
taint := &k8sv1.Taint{
Key: taintKey,
Effect: k8sv1.TaintEffectNoSchedule,
}
vmisToMigrate := vmisToMigrate(node, vmisOnNode, taint)
if len(vmisToMigrate) == 0 {
return nil
}
migrationCandidates, nonMigrateable := c.filterRunningNonMigratingVMIs(vmisToMigrate, activeMigrations)
if len(migrationCandidates) == 0 && len(nonMigrateable) == 0 {
return nil
}
selectedCandidates := migrationCandidates
if !c.clusterConfig.MigrationPriorityQueueEnabled() {
runningMigrations := migrationutils.FilterRunningMigrations(activeMigrations)
activeMigrationsFromThisSourceNode := c.numOfVMIMForThisSourceNode(vmisOnNode, runningMigrations)
maxParallelMigrationsPerOutboundNode :=
int(*c.clusterConfig.GetMigrationConfiguration().ParallelOutboundMigrationsPerNode)
maxParallelMigrations := int(*c.clusterConfig.GetMigrationConfiguration().ParallelMigrationsPerCluster)
freeSpotsPerCluster := maxParallelMigrations - len(runningMigrations)
freeSpotsPerThisSourceNode := maxParallelMigrationsPerOutboundNode - activeMigrationsFromThisSourceNode
freeSpots := int(math.Min(float64(freeSpotsPerCluster), float64(freeSpotsPerThisSourceNode)))
if freeSpots <= 0 {
c.Queue.AddAfter(node.Name, 5*time.Second)
return nil
}
diff := int(math.Min(float64(freeSpots), float64(len(migrationCandidates))))
remaining := freeSpots - diff
remainingForNonMigrateableDiff := int(math.Min(float64(remaining), float64(len(nonMigrateable))))
if remainingForNonMigrateableDiff > 0 {
// for all non-migrating VMIs which would get e spot emit a warning
for _, vmi := range nonMigrateable[0:remainingForNonMigrateableDiff] {
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, FailedCreateVirtualMachineInstanceMigrationReason, "VirtualMachineInstance is not migrateable")
}
}
if diff == 0 {
if remainingForNonMigrateableDiff > 0 {
// Let's ensure that some warnings will stay in the event log and periodically update
// In theory the warnings could disappear after one hour if nothing else updates
c.Queue.AddAfter(node.Name, 1*time.Minute)
}
// nothing to do
return nil
}
// TODO: should the order be randomized?
selectedCandidates = migrationCandidates[0:diff]
}
actualSpots := len(selectedCandidates)
log.DefaultLogger().Infof("node: %v, migrations: %v, candidates: %v, selected: %v", node.Name, len(activeMigrations), len(migrationCandidates), len(selectedCandidates))
wg := &sync.WaitGroup{}
wg.Add(actualSpots)
errChan := make(chan error, actualSpots)
c.migrationExpectations.ExpectCreations(node.Name, actualSpots)
for _, vmi := range selectedCandidates {
go func(vmi *virtv1.VirtualMachineInstance) {
defer wg.Done()
createdMigration, err := c.clientset.VirtualMachineInstanceMigration(vmi.Namespace).Create(context.Background(), GenerateNewMigration(vmi, node.Name, c.clusterConfig), v1.CreateOptions{})
if err != nil {
c.migrationExpectations.CreationObserved(node.Name)
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, FailedCreateVirtualMachineInstanceMigrationReason, "Error creating a Migration: %v", err)
errChan <- err
return
} else {
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, SuccessfulCreateVirtualMachineInstanceMigrationReason, "Created Migration %s", createdMigration.Name)
}
}(vmi)
}
wg.Wait()
select {
case err := <-errChan:
return err
default:
}
return nil
}
func hasMigratedOnEviction(vmi *virtv1.VirtualMachineInstance) bool {
return vmi.Status.NodeName != vmi.Status.EvacuationNodeName
}
func vmisToMigrate(node *k8sv1.Node, vmisOnNode []*virtv1.VirtualMachineInstance, taint *k8sv1.Taint) []*virtv1.VirtualMachineInstance {
var vmisToMigrate []*virtv1.VirtualMachineInstance
if nodeHasTaint(taint, node) {
vmisToMigrate = vmisOnNode
} else if evictedVMIs := getMarkedForEvictionVMIs(vmisOnNode); len(evictedVMIs) > 0 {
vmisToMigrate = evictedVMIs
}
return vmisToMigrate
}
func (c *EvacuationController) listVMIsOnNode(nodeName string) ([]*virtv1.VirtualMachineInstance, error) {
objs, err := c.vmiIndexer.ByIndex("node", nodeName)
if err != nil {
return nil, err
}
vmis := []*virtv1.VirtualMachineInstance{}
for _, obj := range objs {
vmis = append(vmis, obj.(*virtv1.VirtualMachineInstance))
}
return vmis, nil
}
func (c *EvacuationController) filterRunningNonMigratingVMIs(vmis []*virtv1.VirtualMachineInstance, migrations []*virtv1.VirtualMachineInstanceMigration) (migrateable []*virtv1.VirtualMachineInstance, nonMigrateable []*virtv1.VirtualMachineInstance) {
lookup := map[string]bool{}
for _, migration := range migrations {
lookup[migration.Namespace+"/"+migration.Spec.VMIName] = true
}
for _, vmi := range vmis {
// vmi is shutting down
if vmi.IsFinal() || vmi.DeletionTimestamp != nil {
continue
}
// does not want to migrate
if !migrationutils.VMIMigratableOnEviction(c.clusterConfig, vmi) {
continue
}
// can't migrate
if !controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceIsMigratable, k8sv1.ConditionTrue) {
nonMigrateable = append(nonMigrateable, vmi)
continue
}
hasMigration := lookup[vmi.Namespace+"/"+vmi.Name]
// already migrating
if hasMigration {
continue
}
if controller.VMIActivePodsCount(vmi, c.vmiPodIndexer) > 1 {
// waiting on target/source pods from a previous migration to terminate
continue
}
// no migration exists,
// the vmi is running,
// only one pod is currently active for vmi
migrateable = append(migrateable, vmi)
}
return migrateable, nonMigrateable
}
// deprecated
// This node evacuation method is deprecated. Use node drain to trigger evictions instead.
func nodeHasTaint(taint *k8sv1.Taint, node *k8sv1.Node) bool {
for _, t := range node.Spec.Taints {
if t.MatchTaint(taint) {
return true
}
}
return false
}
func (c *EvacuationController) numOfVMIMForThisSourceNode(
vmisOnNode []*virtv1.VirtualMachineInstance,
activeMigrations []*virtv1.VirtualMachineInstanceMigration) (activeMigrationsFromThisSourceNode int) {
vmiMap := make(map[string]bool)
for _, vmi := range vmisOnNode {
vmiMap[vmi.Name] = true
}
for _, vmim := range activeMigrations {
if _, ok := vmiMap[vmim.Spec.VMIName]; ok {
activeMigrationsFromThisSourceNode++
}
}
return
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package migration
import (
"context"
"fmt"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
)
func (c *Controller) initializeMigrateSourceState(migration *v1.VirtualMachineInstanceMigration, vmi *v1.VirtualMachineInstance) {
if vmi.Status.MigrationState == nil || vmi.IsMigrationCompleted() {
vmi.Status.MigrationState = &v1.VirtualMachineInstanceMigrationState{}
}
if vmi.Status.MigrationState.SourceState == nil {
vmi.Status.MigrationState.SourceState = &v1.VirtualMachineInstanceMigrationSourceState{}
}
if vmi.Status.MigrationState.TargetState == nil {
vmi.Status.MigrationState.TargetState = &v1.VirtualMachineInstanceMigrationTargetState{}
}
vmi.Status.MigrationState.SourceState.MigrationUID = migration.UID
vmi.Status.MigrationState.SourceState.VirtualMachineInstanceUID = &vmi.UID
vmi.Status.MigrationState.TargetState.SyncAddress = &migration.Spec.SendTo.ConnectURL
}
func (c *Controller) initializeMigrateTargetState(migration *v1.VirtualMachineInstanceMigration, vmi *v1.VirtualMachineInstance) {
if vmi.Status.MigrationState == nil || vmi.IsMigrationCompleted() {
vmi.Status.MigrationState = &v1.VirtualMachineInstanceMigrationState{}
}
if vmi.Status.MigrationState.TargetState == nil {
vmi.Status.MigrationState.TargetState = &v1.VirtualMachineInstanceMigrationTargetState{}
}
vmi.Status.MigrationState.TargetState.Pod = vmi.Status.MigrationState.TargetPod
vmi.Status.MigrationState.TargetState.Node = vmi.Status.MigrationState.TargetNode
vmi.Status.MigrationState.TargetState.MigrationUID = migration.UID
}
func (c *Controller) appendMigratedVolume(vmi *v1.VirtualMachineInstance, claimName string, volume v1.Volume) error {
key := controller.NamespacedKey(vmi.Namespace, claimName)
obj, exists, err := c.pvcStore.GetByKey(key)
if err != nil || !exists {
return err
}
pvc := obj.(*k8sv1.PersistentVolumeClaim)
vmi.Status.MigratedVolumes = append(vmi.Status.MigratedVolumes, v1.StorageMigratedVolumeInfo{
VolumeName: volume.Name,
SourcePVCInfo: &v1.PersistentVolumeClaimInfo{
ClaimName: claimName,
AccessModes: pvc.Spec.AccessModes,
VolumeMode: pvc.Spec.VolumeMode,
Requests: pvc.Spec.Resources.Requests,
Capacity: pvc.Status.Capacity,
},
DestinationPVCInfo: &v1.PersistentVolumeClaimInfo{
ClaimName: claimName,
AccessModes: pvc.Spec.AccessModes,
VolumeMode: pvc.Spec.VolumeMode,
Requests: pvc.Spec.Resources.Requests,
Capacity: pvc.Status.Capacity,
},
})
return nil
}
func (c *Controller) patchMigratedVolumesForDecentralizedMigration(vmi *v1.VirtualMachineInstance) error {
vmiCopy := vmi.DeepCopy()
vmiCopy.Status.MigratedVolumes = []v1.StorageMigratedVolumeInfo{}
// Mark all DV/PVC volumes as migrateable in the VMI status.
for _, volume := range vmiCopy.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
if err := c.appendMigratedVolume(vmiCopy, volume.PersistentVolumeClaim.ClaimName, volume); err != nil {
return err
}
} else if volume.DataVolume != nil {
if err := c.appendMigratedVolume(vmiCopy, volume.DataVolume.Name, volume); err != nil {
return err
}
}
}
patchSet := patch.New()
if !equality.Semantic.DeepEqual(vmiCopy.Status.MigratedVolumes, vmi.Status.MigratedVolumes) {
patchSet.AddOption(
patch.WithTest("/status/migratedVolumes", vmi.Status.MigratedVolumes),
patch.WithReplace("/status/migratedVolumes", vmiCopy.Status.MigratedVolumes),
)
}
if !patchSet.IsEmpty() {
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return err
}
}
return nil
}
func (c *Controller) updateVMIMigrationSourceWithPodInfo(migration *v1.VirtualMachineInstanceMigration, vmi *v1.VirtualMachineInstance) error {
if !migration.IsDecentralized() {
return nil
}
vmiCopy := vmi.DeepCopy()
vmiCopy.Status.MigrationState.SourceNode = vmi.Status.NodeName
vmiCopy.Status.MigrationState.SourcePod = migration.Status.MigrationState.SourcePod
vmiCopy.Status.MigrationState.MigrationUID = migration.UID
vmiCopy.Status.MigrationState.SourceState.Node = vmi.Status.NodeName
vmiCopy.Status.MigrationState.SourceState.Pod = migration.Status.MigrationState.SourcePod
vmiCopy.Status.MigrationState.SourceState.PersistentStatePVCName = &migration.Status.MigrationState.SourcePersistentStatePVCName
vmiCopy.Status.MigrationState.SourceState.SelinuxContext = vmi.Status.SelinuxContext
nodeSelectors, err := c.getNodeSelectorsFromNodeName(vmi.Status.NodeName)
if err != nil {
return err
}
vmiCopy.Status.MigrationState.SourceState.NodeSelectors = nodeSelectors
if err := c.patchVMI(vmi, vmiCopy); err != nil {
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedHandOverPodReason, fmt.Sprintf("failed to set migration SourceState in VMI status. :%v", err))
return err
}
return nil
}
func (c *Controller) handlePostHandoffMigrationCancel(migration *v1.VirtualMachineInstanceMigration, vmi *v1.VirtualMachineInstance) error {
if !vmi.IsDecentralizedMigration() {
// Do not delete the pod if it is not a decentralized migration
return nil
}
pod, err := controller.CurrentVMIPod(vmi, c.podIndexer)
if err != nil {
return err
}
c.podExpectations.ExpectDeletions(controller.MigrationKey(migration), []string{controller.PodKey(pod)})
if err := c.clientset.CoreV1().Pods(vmi.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}); err != nil {
c.podExpectations.DeletionObserved(controller.MigrationKey(migration), controller.PodKey(pod))
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedDeletePodReason, "Error deleting canceled migration target pod: %v", err)
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeNormal, controller.SuccessfulDeletePodReason, "Deleted canceled target pod %s for migration %s", pod.Name, migration.Name)
log.Log.Object(vmi).Infof("Deleted canceled target pod %s for migration %s", pod.Name, migration.Name)
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package migration
import (
"context"
"errors"
"fmt"
"maps"
"slices"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/opencontainers/selinux/go-selinux"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/trace"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/tpm"
"kubevirt.io/kubevirt/pkg/util"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/migrations/v1alpha1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/controller"
workqueuemetrics "kubevirt.io/kubevirt/pkg/monitoring/metrics/common/workqueue"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
migrationsutil "kubevirt.io/kubevirt/pkg/util/migrations"
traceUtils "kubevirt.io/kubevirt/pkg/util/trace"
"kubevirt.io/kubevirt/pkg/virt-controller/services"
"sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue"
)
const (
failedToProcessDeleteNotificationErrMsg = "Failed to process delete notification"
successfulUpdatePodDisruptionBudgetReason = "SuccessfulUpdate"
failedUpdatePodDisruptionBudgetReason = "FailedUpdate"
failedGetAttractionPodsFmt = "failed to get attachment pods: %v"
)
const vmiPodIndex = "vmiPodIndex"
// This is the timeout used when a target pod is stuck in
// a pending unschedulable state.
const defaultUnschedulablePendingTimeoutSeconds = int64(60 * 5)
// This is how many finalized migration objects left in
// the system before we begin garbage collecting the oldest
// migration objects
const defaultFinalizedMigrationGarbageCollectionBuffer = 5
// This catch-all timeout is used when a target pod is stuck in
// the pending phase for any reason. The theory behind this timeout
// being longer than the unschedulable timeout is that we don't necessarily
// know all the reasons a pod will be stuck in pending for an extended
// period of time, so we want to make this timeout long enough that it doesn't
// cause the migration to fail when it could have reasonably succeeded.
const defaultCatchAllPendingTimeoutSeconds = int64(60 * 15)
// This controller is driven by a priority queue, so that proper attention is
// given to active migrations. When a pending migration gets re-enqueued for
// capacity reasons, we need to ensure it doesn't get re-processed as long as
// capacity hasn't freed up, or it will delay processing of active migrations.
// The informer will preserve priorities above 0 but bump negative ones to 0.
var migrationBackoffError = errors.New(controller.MigrationBackoffReason)
type templateService interface {
RenderMigrationManifest(vmi *virtv1.VirtualMachineInstance, migration *virtv1.VirtualMachineInstanceMigration, sourcePod *k8sv1.Pod) (*k8sv1.Pod, error)
RenderLaunchManifest(vmi *virtv1.VirtualMachineInstance) (*k8sv1.Pod, error)
RenderHotplugAttachmentPodTemplate(volumes []*virtv1.Volume, ownerPod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance, claimMap map[string]*k8sv1.PersistentVolumeClaim) (*k8sv1.Pod, error)
GetLauncherImage() string
}
type networkAnnotationsGenerator interface {
GenerateFromActivePod(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) map[string]string
}
type Controller struct {
templateService templateService
clientset kubecli.KubevirtClient
Queue priorityqueue.PriorityQueue[string]
vmiStore cache.Store
podIndexer cache.Indexer
migrationIndexer cache.Indexer
nodeStore cache.Store
pvcStore cache.Store
storageClassStore cache.Store
storageProfileStore cache.Store
migrationPolicyStore cache.Store
kubevirtStore cache.Store
resourceQuotaIndexer cache.Indexer
recorder record.EventRecorder
podExpectations *controller.UIDTrackingControllerExpectations
pvcExpectations *controller.UIDTrackingControllerExpectations
migrationStartLock *sync.Mutex
clusterConfig *virtconfig.ClusterConfig
hasSynced func() bool
virtControllerVMIMWorkQueueTracer *traceUtils.Tracer
netAnnotationsGenerator networkAnnotationsGenerator
// the set of cancelled migrations before being handed off to virt-handler.
// the map keys are migration keys
handOffLock sync.Mutex
handOffMap map[string]struct{}
unschedulablePendingTimeoutSeconds int64
catchAllPendingTimeoutSeconds int64
}
func NewController(templateService templateService,
vmiInformer cache.SharedIndexInformer,
podInformer cache.SharedIndexInformer,
migrationInformer cache.SharedIndexInformer,
nodeInformer cache.SharedIndexInformer,
pvcInformer cache.SharedIndexInformer,
storageClassInformer cache.SharedIndexInformer,
storageProfileInformer cache.SharedIndexInformer,
migrationPolicyInformer cache.SharedIndexInformer,
resourceQuotaInformer cache.SharedIndexInformer,
kubevirtInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
clientset kubecli.KubevirtClient,
clusterConfig *virtconfig.ClusterConfig,
netAnnotationsGenerator networkAnnotationsGenerator,
) (*Controller, error) {
c := &Controller{
templateService: templateService,
Queue: priorityqueue.New[string]("virt-controller-migration", func(o *priorityqueue.Opts[string]) {
o.RateLimiter = workqueue.DefaultTypedControllerRateLimiter[string]()
o.MetricProvider = workqueuemetrics.NewPrometheusMetricsProvider()
}),
vmiStore: vmiInformer.GetStore(),
podIndexer: podInformer.GetIndexer(),
migrationIndexer: migrationInformer.GetIndexer(),
nodeStore: nodeInformer.GetStore(),
pvcStore: pvcInformer.GetStore(),
storageClassStore: storageClassInformer.GetStore(),
storageProfileStore: storageProfileInformer.GetStore(),
resourceQuotaIndexer: resourceQuotaInformer.GetIndexer(),
migrationPolicyStore: migrationPolicyInformer.GetStore(),
kubevirtStore: kubevirtInformer.GetStore(),
recorder: recorder,
clientset: clientset,
podExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
pvcExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
migrationStartLock: &sync.Mutex{},
clusterConfig: clusterConfig,
handOffMap: make(map[string]struct{}),
netAnnotationsGenerator: netAnnotationsGenerator,
unschedulablePendingTimeoutSeconds: defaultUnschedulablePendingTimeoutSeconds,
catchAllPendingTimeoutSeconds: defaultCatchAllPendingTimeoutSeconds,
}
c.virtControllerVMIMWorkQueueTracer = &traceUtils.Tracer{Threshold: time.Second}
c.hasSynced = func() bool {
return vmiInformer.HasSynced() &&
podInformer.HasSynced() &&
migrationInformer.HasSynced() &&
resourceQuotaInformer.HasSynced() &&
kubevirtInformer.HasSynced() &&
storageClassInformer.HasSynced() &&
storageProfileInformer.HasSynced() &&
migrationPolicyInformer.HasSynced() &&
pvcInformer.HasSynced() &&
nodeInformer.HasSynced()
}
_, err := vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVMI,
DeleteFunc: c.deleteVMI,
UpdateFunc: c.updateVMI,
})
if err != nil {
return nil, err
}
_, err = podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPod,
DeleteFunc: c.deletePod,
UpdateFunc: c.updatePod,
})
if err != nil {
return nil, err
}
_, err = migrationInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addMigration,
DeleteFunc: c.deleteMigration,
UpdateFunc: c.updateMigration,
})
if err != nil {
return nil, err
}
_, err = pvcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPVC,
})
if err != nil {
return nil, err
}
_, err = resourceQuotaInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updateResourceQuota,
DeleteFunc: c.deleteResourceQuota,
})
if err != nil {
return nil, err
}
_, err = kubevirtInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updateKubeVirt,
})
if err != nil {
return nil, err
}
return c, addVMIPodIndexer(c.podIndexer)
}
func addVMIPodIndexer(podIndexer cache.Indexer) error {
return podIndexer.AddIndexers(cache.Indexers{
vmiPodIndex: func(obj any) ([]string, error) {
pod, ok := obj.(*k8sv1.Pod)
if !ok {
return nil, nil
}
if pod.Labels == nil {
return nil, nil
}
if value, ok := pod.Labels[virtv1.CreatedByLabel]; ok {
return []string{value}, nil
}
return nil, nil
},
})
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.Queue.ShutDown()
log.Log.Info("Starting migration controller.")
// Wait for cache sync before we start the pod controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping migration controller.")
}
func (c *Controller) runWorker() {
for c.Execute() {
}
}
func (c *Controller) Execute() bool {
key, priority, quit := c.Queue.GetWithPriority()
if quit {
return false
}
c.virtControllerVMIMWorkQueueTracer.StartTrace(key, "virt-controller VMIM workqueue", trace.Field{Key: "Workqueue Key", Value: key})
defer c.virtControllerVMIMWorkQueueTracer.StopTrace(key)
defer c.Queue.Done(key)
err := c.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing Migration %v", key)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(priority), RateLimited: true}, key)
} else {
log.Log.V(4).Infof("processed Migration %v", key)
c.Queue.Forget(key)
}
return true
}
func ensureSelectorLabelPresent(migration *virtv1.VirtualMachineInstanceMigration) {
if migration.Labels == nil {
migration.Labels = map[string]string{virtv1.MigrationSelectorLabel: migration.Spec.VMIName}
} else if _, exist := migration.Labels[virtv1.MigrationSelectorLabel]; !exist {
migration.Labels[virtv1.MigrationSelectorLabel] = migration.Spec.VMIName
}
}
func (c *Controller) patchVMI(origVMI, newVMI *virtv1.VirtualMachineInstance) error {
patchSet := patch.New()
if !equality.Semantic.DeepEqual(origVMI.Status.MigrationState, newVMI.Status.MigrationState) {
if origVMI.Status.MigrationState == nil {
patchSet.AddOption(
patch.WithTest("/status/migrationState", origVMI.Status.MigrationState),
patch.WithAdd("/status/migrationState", newVMI.Status.MigrationState))
} else {
patchSet.AddOption(
patch.WithTest("/status/migrationState", origVMI.Status.MigrationState),
patch.WithReplace("/status/migrationState", newVMI.Status.MigrationState),
)
}
}
if !equality.Semantic.DeepEqual(origVMI.Labels, newVMI.Labels) {
patchSet.AddOption(
patch.WithTest("/metadata/labels", origVMI.Labels),
patch.WithReplace("/metadata/labels", newVMI.Labels),
)
}
if !patchSet.IsEmpty() {
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
log.Log.Object(origVMI).V(4).Infof("patch VMI with %s", string(patchBytes))
if _, err = c.clientset.VirtualMachineInstance(origVMI.Namespace).Patch(context.Background(), origVMI.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{}); err != nil {
return err
}
}
return nil
}
func (c *Controller) execute(key string) error {
var vmi *virtv1.VirtualMachineInstance
var targetPods []*k8sv1.Pod
// Fetch the latest state from cache
obj, exists, err := c.migrationIndexer.GetByKey(key)
if err != nil {
return err
}
if !exists {
c.podExpectations.DeleteExpectations(key)
c.removeHandOffKey(key)
return nil
}
migration := obj.(*virtv1.VirtualMachineInstanceMigration)
logger := log.Log.Object(migration)
// this must be first step in execution. Writing the object
// when api version changes ensures our api stored version is updated.
if !controller.ObservedLatestApiVersionAnnotation(migration) {
migration := migration.DeepCopy()
controller.SetLatestApiVersionAnnotation(migration)
// Ensure the migration contains our selector label
ensureSelectorLabelPresent(migration)
_, err = c.clientset.VirtualMachineInstanceMigration(migration.Namespace).Update(context.Background(), migration, metav1.UpdateOptions{})
return err
}
vmiObj, vmiExists, err := c.vmiStore.GetByKey(fmt.Sprintf("%s/%s", migration.Namespace, migration.Spec.VMIName))
if err != nil {
return err
}
if !vmiExists {
if migration.DeletionTimestamp == nil {
logger.V(3).Infof("Deleting migration for deleted vmi %s/%s", migration.Namespace, migration.Spec.VMIName)
return c.clientset.VirtualMachineInstanceMigration(migration.Namespace).Delete(context.Background(), migration.Name, v1.DeleteOptions{})
}
// nothing to process for a migration that has no VMI
return nil
}
vmi = vmiObj.(*virtv1.VirtualMachineInstance)
targetPods, err = c.listMatchingTargetPods(migration, vmi)
if err != nil {
return err
}
needsSync := c.podExpectations.SatisfiedExpectations(key) && c.pvcExpectations.SatisfiedExpectations(key)
logger.V(4).Infof("processing migration: needsSync %t, hasVMI %t, targetPod len %d", needsSync, vmiExists, len(targetPods))
var syncErr error
if needsSync {
syncErr = c.sync(key, migration, vmi, targetPods)
}
err = c.updateStatus(migration, vmi, targetPods, syncErr)
if err != nil {
return err
}
if syncErr != nil {
return syncErr
}
if migration.IsFinal() {
err = c.garbageCollectFinalizedMigrations(vmi)
if err != nil {
return err
}
if c.clusterConfig.MigrationPriorityQueueEnabled() {
// re-enqueue of highest priority pending migration since now there is a free spot
err = c.reEnqueueHighestPriorityPendingMigrations()
if err != nil {
return err
}
}
}
return nil
}
func (c *Controller) reEnqueueHighestPriorityPendingMigrations() error {
unfinishedMigrations := migrationsutil.ListUnfinishedMigrations(c.migrationIndexer)
var pendings []*virtv1.VirtualMachineInstanceMigration
for _, m := range unfinishedMigrations {
if m.Status.Phase == virtv1.MigrationPending {
pendings = append(pendings, m)
}
}
sort.Slice(pendings, func(i, j int) bool {
return *migrationsutil.PriorityFromMigration(pendings[i]) > *migrationsutil.PriorityFromMigration(pendings[j])
})
parallelLimit := int(*c.clusterConfig.GetMigrationConfiguration().ParallelMigrationsPerCluster)
for i := 0; i < min(len(pendings), parallelLimit); i++ {
key, err := controller.KeyFunc(pendings[i])
if err != nil {
continue
}
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: migrationsutil.PriorityFromMigration(pendings[i])}, key)
}
return nil
}
func (c *Controller) canMigrateVMI(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance) (bool, error) {
if vmi.Status.MigrationState == nil {
return true, nil
} else if vmi.Status.MigrationState.MigrationUID == migration.UID {
return true, nil
} else if vmi.Status.MigrationState.MigrationUID == "" {
return true, nil
}
curMigrationUID := vmi.Status.MigrationState.MigrationUID
// check to see if the curMigrationUID still exists or is finalized
objs, err := c.migrationIndexer.ByIndex(cache.NamespaceIndex, migration.Namespace)
if err != nil {
return false, err
}
for _, obj := range objs {
curMigration := obj.(*virtv1.VirtualMachineInstanceMigration)
if curMigration.UID != curMigrationUID {
continue
}
if curMigration.IsFinal() {
// If the other job already completed, it's okay to take over the migration.
return true, nil
}
return false, nil
}
return true, nil
}
func (c *Controller) failMigration(migration *virtv1.VirtualMachineInstanceMigration) error {
err := backendstorage.MigrationAbort(c.clientset, migration)
if err != nil {
return err
}
migration.Status.Phase = virtv1.MigrationFailed
return nil
}
func (c *Controller) interruptMigration(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil || !backendstorage.IsBackendStorageNeeded(vmi) {
return c.failMigration(migration)
}
return backendstorage.RecoverFromBrokenMigration(c.clientset, migration, c.pvcStore, vmi, c.templateService.GetLauncherImage())
}
func (c *Controller) updateStatus(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, pods []*k8sv1.Pod, syncError error) error {
var pod *k8sv1.Pod = nil
var attachmentPod *k8sv1.Pod = nil
conditionManager := controller.NewVirtualMachineInstanceMigrationConditionManager()
migrationCopy := migration.DeepCopy()
podExists, attachmentPodExists := len(pods) > 0, false
if podExists {
pod = pods[0]
if attachmentPods, err := controller.AttachmentPods(pod, c.podIndexer); err != nil {
return fmt.Errorf(failedGetAttractionPodsFmt, err)
} else {
attachmentPodExists = len(attachmentPods) > 0
if attachmentPodExists {
attachmentPod = attachmentPods[0]
}
}
}
// Status checking of active Migration job.
//
// - Fail if any obvious failure is found
// - Interrupt if something unexpectedly disappeared
// - Begin progressing migration state based on VMI's MigrationState status.
if migration.IsFinal() {
if vmi.IsMigrationSynchronized(migration) && migration.UID == vmi.Status.MigrationState.MigrationUID {
// Store the finalized migration state data from the VMI status in the migration object
migrationCopy.Status.MigrationState = vmi.Status.MigrationState
}
// Remove the finalizer and conditions if the migration has already completed
controller.RemoveFinalizer(migrationCopy, virtv1.VirtualMachineInstanceMigrationFinalizer)
} else if vmi == nil {
err := c.failMigration(migrationCopy)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "Migration failed because vmi does not exist.")
log.Log.Object(migration).Error("vmi does not exist")
} else if vmi.IsFinal() && !vmi.IsMigrationSource() {
err := c.interruptMigration(migrationCopy, vmi)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "Migration failed vmi shutdown during migration.")
log.Log.Object(migration).Error("Unable to migrate vmi because vmi is shutdown.")
} else if migration.DeletionTimestamp != nil && !c.isMigrationHandedOff(migration, vmi) {
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "Migration failed due to being canceled")
if !conditionManager.HasCondition(migration, virtv1.VirtualMachineInstanceMigrationAbortRequested) {
condition := virtv1.VirtualMachineInstanceMigrationCondition{
Type: virtv1.VirtualMachineInstanceMigrationAbortRequested,
Status: k8sv1.ConditionTrue,
LastProbeTime: v1.Now(),
}
migrationCopy.Status.Conditions = append(migrationCopy.Status.Conditions, condition)
}
err := c.failMigration(migrationCopy)
if err != nil {
return err
}
} else if podExists && controller.PodIsDown(pod) {
err := c.interruptMigration(migrationCopy, vmi)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "Migration failed because target pod shutdown during migration")
log.Log.Object(migration).Errorf("target pod %s/%s shutdown during migration", pod.Namespace, pod.Name)
if err := c.handlePostHandoffMigrationCancel(migration, vmi); err != nil {
return err
}
} else if migration.TargetIsCreated() && !podExists && migration.IsLocalOrDecentralizedTarget() {
err := c.interruptMigration(migrationCopy, vmi)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "Migration target pod was removed during active migration.")
log.Log.Object(migration).Error("target pod disappeared during migration")
} else if migration.TargetIsHandedOff() && !vmi.IsMigrationSynchronized(migration) {
err := c.failMigration(migrationCopy)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "VMI's migration state was cleared during the active migration.")
log.Log.Object(migration).Error("vmi migration state cleared during migration")
} else if migration.TargetIsHandedOff() &&
vmi.IsMigrationSynchronized(migration) &&
vmi.Status.MigrationState.MigrationUID != migration.UID {
err := c.failMigration(migrationCopy)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "VMI's migration state was taken over by another migration job during active migration.")
log.Log.Object(migration).Error("vmi's migration state was taken over by another migration object")
} else if vmi.IsMigrationSynchronized(migration) &&
vmi.Status.MigrationState.MigrationUID == migration.UID &&
vmi.Status.MigrationState.Failed {
err := c.failMigration(migrationCopy)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "source node reported migration failed")
log.Log.Object(migration).Errorf("VMI %s/%s reported migration failed", vmi.Namespace, vmi.Name)
} else if migration.DeletionTimestamp != nil && !migration.IsFinal() &&
!conditionManager.HasCondition(migration, virtv1.VirtualMachineInstanceMigrationAbortRequested) {
condition := virtv1.VirtualMachineInstanceMigrationCondition{
Type: virtv1.VirtualMachineInstanceMigrationAbortRequested,
Status: k8sv1.ConditionTrue,
LastProbeTime: v1.Now(),
}
migrationCopy.Status.Conditions = append(migrationCopy.Status.Conditions, condition)
} else if attachmentPodExists && controller.PodIsDown(attachmentPod) {
err := c.failMigration(migrationCopy)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "Migration failed because target attachment pod shutdown during migration")
log.Log.Object(migration).Errorf("target attachment pod %s/%s shutdown during migration", attachmentPod.Namespace, attachmentPod.Name)
} else {
err := c.processMigrationPhase(migration, migrationCopy, pod, attachmentPod, vmi, syncError)
if err != nil {
return err
}
}
controller.SetVMIMigrationPhaseTransitionTimestamp(migration, migrationCopy)
controller.SetSourcePod(migrationCopy, vmi, c.podIndexer)
if err := c.setSynchronizationAddressStatus(migrationCopy); err != nil {
return err
}
if !equality.Semantic.DeepEqual(migration.Status, migrationCopy.Status) {
var err error
migration, err = c.clientset.VirtualMachineInstanceMigration(migrationCopy.Namespace).UpdateStatus(context.Background(), migrationCopy, v1.UpdateOptions{})
if err != nil {
return err
}
}
if !equality.Semantic.DeepEqual(migration.Finalizers, migrationCopy.Finalizers) {
_, err := c.clientset.VirtualMachineInstanceMigration(migrationCopy.Namespace).Update(context.Background(), migrationCopy, metav1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
func (c *Controller) setSynchronizationAddressStatus(migration *virtv1.VirtualMachineInstanceMigration) error {
kvs := c.kubevirtStore.List()
if len(kvs) > 1 {
log.Log.Errorf("More than one KubeVirt custom resource detected: %v", len(kvs))
return fmt.Errorf("more than one KubeVirt custom resource detected: %v", len(kvs))
}
if len(kvs) == 1 {
kv, ok := kvs[0].(*virtv1.KubeVirt)
if !ok {
log.Log.Errorf("found unknown object in kubevirt store %v", kvs[0])
return fmt.Errorf("found unknown object in kubevirt store %v", kvs[0])
}
migration.Status.SynchronizationAddresses = kv.Status.SynchronizationAddresses
}
return nil
}
func (c *Controller) processMigrationPhase(
migration, migrationCopy *virtv1.VirtualMachineInstanceMigration,
pod, attachmentPod *k8sv1.Pod,
vmi *virtv1.VirtualMachineInstance,
syncError error,
) error {
conditionManager := controller.NewVirtualMachineInstanceMigrationConditionManager()
vmiConditionManager := controller.NewVirtualMachineInstanceConditionManager()
switch migration.Status.Phase {
case virtv1.MigrationPhaseUnset:
canMigrate, err := c.canMigrateVMI(migration, vmi)
if err != nil {
return err
}
if canMigrate {
if migration.IsDecentralizedTarget() {
migrationCopy.Status.Phase = virtv1.MigrationWaitingForSync
} else if migration.IsDecentralizedSource() {
migrationCopy.Status.Phase = virtv1.MigrationSynchronizing
} else {
migrationCopy.Status.Phase = virtv1.MigrationPending
}
} else {
// can not migrate because there is an active migration already
// in progress for this VMI.
err := c.failMigration(migrationCopy)
if err != nil {
return err
}
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedMigrationReason, "VMI is not eligible for migration because another migration job is in progress.")
log.Log.Object(migration).Error("Migration object ont eligible for migration because another job is in progress")
}
case virtv1.MigrationPending:
if hasUtilityVolumes, err := c.handleUtilityVolumes(migrationCopy, vmi); err != nil || hasUtilityVolumes {
return err
}
if migration.IsLocalOrDecentralizedTarget() {
if pod != nil {
if controller.VMIHasHotplugVolumes(vmi) {
if attachmentPod != nil && controller.IsPodReady(attachmentPod) {
migrationCopy.Status.Phase = virtv1.MigrationScheduling
}
} else {
migrationCopy.Status.Phase = virtv1.MigrationScheduling
}
} else if syncError != nil && strings.Contains(syncError.Error(), "exceeded quota") && !conditionManager.HasCondition(migration, virtv1.VirtualMachineInstanceMigrationRejectedByResourceQuota) {
condition := virtv1.VirtualMachineInstanceMigrationCondition{
Type: virtv1.VirtualMachineInstanceMigrationRejectedByResourceQuota,
Status: k8sv1.ConditionTrue,
LastProbeTime: v1.Now(),
}
migrationCopy.Status.Conditions = append(migrationCopy.Status.Conditions, condition)
}
} else {
if migration.IsDecentralizedSource() && vmi.IsRunning() {
// Decentralized source migration, switch to scheduling.
migrationCopy.Status.Phase = virtv1.MigrationScheduling
}
}
case virtv1.MigrationWaitingForSync:
if vmi.IsMigrationSourceSynchronized() {
migrationCopy.Status.Phase = virtv1.MigrationPending
}
case virtv1.MigrationSynchronizing:
if vmi.IsMigrationSynchronized(migration) {
// Sync happened, switch to MigrationPendingTargetVMI
migrationCopy.Status.Phase = virtv1.MigrationPending
}
case virtv1.MigrationScheduling:
if conditionManager.HasCondition(migrationCopy, virtv1.VirtualMachineInstanceMigrationRejectedByResourceQuota) {
conditionManager.RemoveCondition(migrationCopy, virtv1.VirtualMachineInstanceMigrationRejectedByResourceQuota)
}
if migration.IsDecentralizedSource() {
if err := c.patchMigratedVolumesForDecentralizedMigration(vmi); err != nil {
return err
}
if vmi.Status.MigrationState.TargetState.Pod != "" {
migrationCopy.Status.Phase = virtv1.MigrationScheduled
}
} else if pod != nil && controller.IsPodReady(pod) {
if controller.VMIHasHotplugVolumes(vmi) {
if attachmentPod != nil && controller.IsPodReady(attachmentPod) {
log.Log.Object(migration).V(5).Infof("attachment pod %s for vmi %s/%s is ready", attachmentPod.Name, vmi.Namespace, vmi.Name)
migrationCopy.Status.Phase = virtv1.MigrationScheduled
}
} else {
migrationCopy.Status.Phase = virtv1.MigrationScheduled
}
}
case virtv1.MigrationScheduled:
if vmi.IsTargetPreparing(migration) {
migrationCopy.Status.Phase = virtv1.MigrationPreparingTarget
}
case virtv1.MigrationPreparingTarget:
if (migration.IsLocalOrDecentralizedSource() && vmi.IsMigrationSourceSynchronized() && vmi.Status.MigrationState.TargetState.NodeAddress != nil) ||
(migration.IsLocalOrDecentralizedTarget() && vmi.Status.MigrationState.TargetNode != "" && vmi.Status.MigrationState.TargetNodeAddress != "") {
migrationCopy.Status.Phase = virtv1.MigrationTargetReady
}
case virtv1.MigrationTargetReady:
if vmi.Status.MigrationState.StartTimestamp != nil {
migrationCopy.Status.Phase = virtv1.MigrationRunning
}
case virtv1.MigrationRunning:
if migration.IsLocalOrDecentralizedTarget() {
_, exists := pod.Annotations[virtv1.MigrationTargetReadyTimestamp]
if !exists && vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp != nil {
if backendstorage.IsBackendStorageNeeded(vmi) {
err := backendstorage.MigrationHandoff(c.clientset, c.pvcStore, migration)
if err != nil {
return err
}
}
patchBytes, err := patch.New(
patch.WithAdd(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(virtv1.MigrationTargetReadyTimestamp)), vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp.String()),
).GeneratePayload()
if err != nil {
return err
}
if _, err = c.clientset.CoreV1().Pods(pod.Namespace).Patch(context.Background(), pod.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{}); err != nil {
return err
}
}
}
log.Log.Object(vmi).V(4).Infof("is migration completed: %t, uid %s", vmi.IsMigrationCompleted(), vmi.UID)
if vmi.Status.MigrationState.Completed &&
!vmiConditionManager.HasCondition(vmi, virtv1.VirtualMachineInstanceVCPUChange) &&
!vmiConditionManager.HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceMemoryChange, k8sv1.ConditionTrue) &&
!vmiConditionManager.HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceMigrationRequired, k8sv1.ConditionTrue) {
migrationCopy.Status.Phase = virtv1.MigrationSucceeded
c.recorder.Eventf(migration, k8sv1.EventTypeNormal, controller.SuccessfulMigrationReason, "Source node reported migration succeeded")
}
}
return nil
}
func setTargetPodSELinuxLevel(pod *k8sv1.Pod, vmiSeContext string) error {
// The target pod may share resources with the sources pod (RWX disks for example)
// Therefore, it needs to share the same SELinux categories to inherit the same permissions
// Note: there is a small probablility that the target pod will share the same categories as another pod on its node.
// It is a slight security concern, but not as bad as removing categories on all shared objects for the duration of the migration.
if vmiSeContext == "none" {
// The SelinuxContext is explicitly set to "none" when SELinux is not present
return nil
}
if vmiSeContext == "" {
return fmt.Errorf("SELinux context not set on VMI status")
} else {
seContext, err := selinux.NewContext(vmiSeContext)
if err != nil {
return err
}
level, exists := seContext["level"]
if exists && level != "" {
// The SELinux context looks like "system_u:object_r:container_file_t:s0:c1,c2", we care about "s0:c1,c2"
if pod.Spec.SecurityContext == nil {
pod.Spec.SecurityContext = &k8sv1.PodSecurityContext{}
}
pod.Spec.SecurityContext.SELinuxOptions = &k8sv1.SELinuxOptions{
Level: level,
}
}
}
return nil
}
func createMigrationPodAntiAffinityRule(templatePod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance) {
antiAffinityTerm := k8sv1.PodAffinityTerm{
LabelSelector: &v1.LabelSelector{
MatchLabels: map[string]string{
virtv1.CreatedByLabel: string(vmi.UID),
},
},
TopologyKey: k8sv1.LabelHostname,
}
antiAffinityRule := &k8sv1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []k8sv1.PodAffinityTerm{antiAffinityTerm},
}
if templatePod.Spec.Affinity == nil {
templatePod.Spec.Affinity = &k8sv1.Affinity{
PodAntiAffinity: antiAffinityRule,
}
} else if templatePod.Spec.Affinity.PodAntiAffinity == nil {
templatePod.Spec.Affinity.PodAntiAffinity = antiAffinityRule
} else {
templatePod.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(templatePod.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, antiAffinityTerm)
}
}
func createDecentralizedMigrationPodAntiAffinity(templatePod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance) {
// Node anti affinity set create anti affinity rules
// for the migration target pod
if templatePod.Spec.Affinity == nil {
templatePod.Spec.Affinity = &k8sv1.Affinity{}
}
if templatePod.Spec.Affinity.NodeAffinity == nil {
templatePod.Spec.Affinity.NodeAffinity = &k8sv1.NodeAffinity{}
}
if templatePod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
templatePod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &k8sv1.NodeSelector{}
}
if templatePod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms == nil {
templatePod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = []k8sv1.NodeSelectorTerm{}
templatePod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(templatePod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, k8sv1.NodeSelectorTerm{})
}
templatePod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions = append(templatePod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions,
k8sv1.NodeSelectorRequirement{
Key: k8sv1.LabelHostname,
Operator: k8sv1.NodeSelectorOpNotIn,
Values: []string{vmi.Status.MigrationState.SourceState.Node},
},
)
}
func (c *Controller) createTargetPod(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, sourcePod *k8sv1.Pod) error {
if !c.pvcExpectations.SatisfiedExpectations(controller.MigrationKey(migration)) {
// Give time to the PVC informer to update itself
return nil
}
selinuxContext := vmi.Status.SelinuxContext
templatePod, err := c.templateService.RenderMigrationManifest(vmi, migration, sourcePod)
if err != nil {
return fmt.Errorf("failed to render launch manifest: %v", err)
}
if migration.IsDecentralizedTarget() {
createDecentralizedMigrationPodAntiAffinity(templatePod, vmi)
selinuxContext = vmi.Status.MigrationState.SourceState.SelinuxContext
} else {
createMigrationPodAntiAffinityRule(templatePod, vmi)
}
nodeSelector := make(map[string]string)
maps.Copy(nodeSelector, migration.Spec.AddedNodeSelector)
maps.Copy(nodeSelector, templatePod.Spec.NodeSelector)
templatePod.Spec.NodeSelector = nodeSelector
templatePod.ObjectMeta.Labels[virtv1.MigrationJobLabel] = string(migration.UID)
templatePod.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = migration.Name
// If cpu model is "host model" allow migration only to nodes that supports this cpu model
if cpu := vmi.Spec.Domain.CPU; cpu != nil && cpu.Model == virtv1.CPUModeHostModel {
var nodeSelectors map[string]string
if migration.IsDecentralizedTarget() {
nodeSelectors, err = getNodeSelectorsFromVMIMigrationSourceState(vmi.Status.MigrationState.SourceState)
} else {
node, err := c.getNodeForVMI(vmi)
if err != nil {
return err
}
nodeSelectors, err = prepareNodeSelectorForHostCpuModel(node, templatePod, sourcePod.Spec.NodeSelector)
}
if err != nil {
return err
}
for k, v := range nodeSelectors {
templatePod.Spec.NodeSelector[k] = v
}
}
// Ensure migration happens only between nodes with the same CPU vendor
// This prevents migrations between AMD and Intel nodes which are not supported
vendorLabelKey := getCPUVendorLabelKey(templatePod.Spec.NodeSelector)
if vendorLabelKey == "" {
var sourceLabels map[string]string
if migration.IsDecentralizedTarget() {
sourceLabels = vmi.Status.MigrationState.SourceState.NodeSelectors
} else {
node, err := c.getNodeForVMI(vmi)
if err != nil {
return err
}
sourceLabels = node.Labels
}
vendorLabelKey = getCPUVendorLabelKey(sourceLabels)
if vendorLabelKey != "" {
templatePod.Spec.NodeSelector[vendorLabelKey] = "true"
}
}
matchLevelOnTarget := c.clusterConfig.GetMigrationConfiguration().MatchSELinuxLevelOnMigration
if matchLevelOnTarget == nil || *matchLevelOnTarget {
err = setTargetPodSELinuxLevel(templatePod, selinuxContext)
if err != nil {
return err
}
}
// This is used by the functional test to simulate failures
computeImageOverride, ok := migration.Annotations[virtv1.FuncTestMigrationTargetImageOverrideAnnotation]
if ok && computeImageOverride != "" {
for i, container := range templatePod.Spec.Containers {
if container.Name == "compute" {
container.Image = computeImageOverride
templatePod.Spec.Containers[i] = container
break
}
}
}
key := controller.MigrationKey(migration)
c.podExpectations.ExpectCreations(key, 1)
pod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), templatePod, v1.CreateOptions{})
if err != nil {
if k8serrors.IsForbidden(err) && strings.Contains(err.Error(), "violates PodSecurity") {
err = fmt.Errorf("failed to create target pod for vmi %s/%s, it needs a privileged namespace to run: %w", vmi.GetNamespace(), vmi.GetName(), err)
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedCreatePodReason, services.FailedToRenderLaunchManifestErrFormat, err)
} else {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedCreatePodReason, "Error creating pod: %v", err)
err = fmt.Errorf("failed to create vmi migration target pod: %v", err)
}
c.podExpectations.CreationObserved(key)
return err
}
log.Log.Object(vmi).V(5).Infof("Created migration target pod %s/%s with uuid %s for migration %s with uuid %s", pod.Namespace, pod.Name, string(pod.UID), migration.Name, string(migration.UID))
c.recorder.Eventf(migration, k8sv1.EventTypeNormal, controller.SuccessfulCreatePodReason, "Created migration target pod %s", pod.Name)
return nil
}
// handleMigrationBackoff introduce a backoff (when needed) only for migrations
// created by the evacuation controller.
func (c *Controller) handleMigrationBackoff(key string, vmi *virtv1.VirtualMachineInstance, migration *virtv1.VirtualMachineInstanceMigration) error {
if _, exists := migration.Annotations[virtv1.FuncTestForceIgnoreMigrationBackoffAnnotation]; exists {
return nil
}
_, existsEvacMig := migration.Annotations[virtv1.EvacuationMigrationAnnotation]
_, existsWorkUpdMig := migration.Annotations[virtv1.WorkloadUpdateMigrationAnnotation]
if !existsEvacMig && !existsWorkUpdMig {
return nil
}
migrations, err := c.listBackoffEligibleMigrations(vmi.Namespace, vmi.Name)
if err != nil {
return err
}
if len(migrations) < 2 {
return nil
}
// Newest first
sort.Sort(sort.Reverse(vmimCollection(migrations)))
if migrations[0].UID != migration.UID {
return nil
}
backoff := time.Second * 0
for _, m := range migrations[1:] {
if m.Status.Phase == virtv1.MigrationSucceeded {
break
}
if m.DeletionTimestamp != nil {
continue
}
if m.Status.Phase == virtv1.MigrationFailed {
if backoff == 0 {
backoff = time.Second * 20
} else {
backoff = backoff * 2
}
}
}
if backoff == 0 {
return nil
}
getFailedTS := func(migration *virtv1.VirtualMachineInstanceMigration) metav1.Time {
for _, ts := range migration.Status.PhaseTransitionTimestamps {
if ts.Phase == virtv1.MigrationFailed {
return ts.PhaseTransitionTimestamp
}
}
return metav1.Time{}
}
outOffBackoffTS := getFailedTS(migrations[1]).Add(backoff)
backoff = outOffBackoffTS.Sub(time.Now())
if backoff > 0 {
log.Log.Object(vmi).Errorf("vmi in migration backoff, re-enqueueing after %v", backoff)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning), After: backoff}, key)
return migrationBackoffError
}
return nil
}
func (c *Controller) handleMarkMigrationFailedOnVMI(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance) error {
// Mark Migration Done on VMI if virt handler never started it.
// Once virt-handler starts the migration, it's up to handler
// to finalize it.
vmiCopy := vmi.DeepCopy()
now := v1.NewTime(time.Now())
vmiCopy.Status.MigrationState.StartTimestamp = &now
vmiCopy.Status.MigrationState.EndTimestamp = &now
vmiCopy.Status.MigrationState.Failed = true
vmiCopy.Status.MigrationState.Completed = true
err := c.patchVMI(vmi, vmiCopy)
if err != nil {
log.Log.Reason(err).Object(vmi).Errorf("Failed to patch VMI status to indicate migration %s/%s failed.", migration.Namespace, migration.Name)
return err
}
log.Log.Object(vmi).Infof("Marked Migration %s/%s failed on vmi due to target pod disappearing before migration kicked off.", migration.Namespace, migration.Name)
failureReason := "Target pod is down"
c.recorder.Event(vmi, k8sv1.EventTypeWarning, controller.FailedMigrationReason, fmt.Sprintf("VirtualMachineInstance migration uid %s failed. reason: %s", string(migration.UID), failureReason))
if vmiCopy.Status.MigrationState.FailureReason == "" {
// Only set the failure reason if empty, as virt-handler may already have provided a better one
vmiCopy.Status.MigrationState.FailureReason = failureReason
}
return nil
}
func (c *Controller) handlePreHandoffMigrationCancel(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) error {
if pod == nil {
return nil
}
c.podExpectations.ExpectDeletions(controller.MigrationKey(migration), []string{controller.PodKey(pod)})
err := c.clientset.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{})
if err != nil {
c.podExpectations.DeletionObserved(controller.MigrationKey(migration), controller.PodKey(pod))
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedDeletePodReason, "Error deleting canceled migration target pod: %v", err)
return fmt.Errorf("cannot delete pending target pod %s/%s for migration although migration is aborted", pod.Name, pod.Namespace)
}
reason := fmt.Sprintf("migration canceled and pod %s/%s is deleted", pod.Namespace, pod.Name)
log.Log.Object(vmi).Infof("Deleted pending migration target pod with uuid %s for migration %s with uuid %s with reason [%s]", string(pod.UID), migration.Name, string(migration.UID), reason)
c.recorder.Event(migration, k8sv1.EventTypeNormal, controller.SuccessfulDeletePodReason, reason)
return nil
}
func (c *Controller) getNodeSelectorsFromNodeName(nodeName string) (map[string]string, error) {
obj, exists, err := c.nodeStore.GetByKey(nodeName)
if err != nil {
return nil, err
}
res := make(map[string]string)
if exists {
node := obj.(*k8sv1.Node)
for key, value := range node.Labels {
if strings.HasPrefix(key, virtv1.HostModelCPULabel) || strings.HasPrefix(key, virtv1.HostModelRequiredFeaturesLabel) || strings.HasPrefix(key, virtv1.CPUModelVendorLabel) {
res[key] = value
}
}
}
return res, nil
}
// updateTargetPodNetworkInfo generates the network-info annotation from the target pod's network-status
func (c *Controller) updateTargetPodNetworkInfo(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) error {
newAnnotations := c.netAnnotationsGenerator.GenerateFromActivePod(vmi, pod)
_, err := controller.SyncPodAnnotations(c.clientset, pod, newAnnotations)
return err
}
func (c *Controller) handleTargetPodHandoff(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) error {
if vmi.IsMigrationSynchronized(migration) && vmi.Status.MigrationState.MigrationUID == migration.UID {
// already handed off
return nil
}
vmiCopy := vmi.DeepCopy()
if vmiCopy.Status.MigrationState == nil || !migration.IsDecentralized() {
vmiCopy.Status.MigrationState = &virtv1.VirtualMachineInstanceMigrationState{}
} else {
vmiCopy.Status.MigrationState.Completed = false
vmiCopy.Status.MigrationState.Failed = false
}
vmiCopy.Status.MigrationState.MigrationUID = migration.UID
vmiCopy.Status.MigrationState.TargetNode = pod.Spec.NodeName
vmiCopy.Status.MigrationState.SourceNode = vmi.Status.NodeName
vmiCopy.Status.MigrationState.TargetPod = pod.Name
if migration.IsDecentralized() {
vmiCopy.Status.MigrationState.TargetState.MigrationUID = migration.UID
vmiCopy.Status.MigrationState.TargetState.Node = pod.Spec.NodeName
vmiCopy.Status.MigrationState.TargetState.Pod = pod.Name
vmiCopy.Status.MigrationState.TargetState.VirtualMachineInstanceUID = &vmi.UID
vmiCopy.Status.MigrationState.TargetState.DomainNamespace = &vmi.Namespace
vmiCopy.Status.MigrationState.TargetState.DomainName = &vmi.Name
vmiCopy.Status.MigrationState.SourceNode = vmiCopy.Status.MigrationState.SourceState.Node
}
if migration.Status.MigrationState != nil {
vmiCopy.Status.MigrationState.SourcePod = migration.Status.MigrationState.SourcePod
vmiCopy.Status.MigrationState.SourcePersistentStatePVCName = migration.Status.MigrationState.SourcePersistentStatePVCName
vmiCopy.Status.MigrationState.TargetPersistentStatePVCName = migration.Status.MigrationState.TargetPersistentStatePVCName
}
// By setting this label, virt-handler on the target node will receive
// the vmi and prepare the local environment for the migration
if vmiCopy.ObjectMeta.Labels == nil {
vmiCopy.ObjectMeta.Labels = make(map[string]string)
}
vmiCopy.ObjectMeta.Labels[virtv1.MigrationTargetNodeNameLabel] = pod.Spec.NodeName
if controller.VMIHasHotplugVolumes(vmiCopy) {
attachmentPods, err := controller.AttachmentPods(pod, c.podIndexer)
if err != nil {
return fmt.Errorf(failedGetAttractionPodsFmt, err)
}
if len(attachmentPods) > 0 {
log.Log.Object(migration).Infof("Target attachment pod for vmi %s/%s: %s", vmiCopy.Namespace, vmiCopy.Name, string(attachmentPods[0].UID))
vmiCopy.Status.MigrationState.TargetAttachmentPodUID = attachmentPods[0].UID
} else {
return fmt.Errorf("target attachment pod not found")
}
}
clusterMigrationConfigs := c.clusterConfig.GetMigrationConfiguration().DeepCopy()
err := c.matchMigrationPolicy(vmiCopy, clusterMigrationConfigs)
if err != nil {
return fmt.Errorf("failed to match migration policy: %v", err)
}
if !c.isMigrationPolicyMatched(vmiCopy) {
vmiCopy.Status.MigrationState.MigrationConfiguration = clusterMigrationConfigs
}
if controller.VMIHasHotplugCPU(vmi) && vmi.IsCPUDedicated() {
cpuLimitsCount, err := getTargetPodLimitsCount(pod)
if err != nil {
return err
}
vmiCopy.ObjectMeta.Labels[virtv1.VirtualMachinePodCPULimitsLabel] = strconv.Itoa(int(cpuLimitsCount))
}
if controller.VMIHasHotplugMemory(vmi) {
memoryReq, err := getTargetPodMemoryRequests(pod)
if err != nil {
return err
}
vmiCopy.ObjectMeta.Labels[virtv1.VirtualMachinePodMemoryRequestsLabel] = memoryReq
}
if backendStoragePVC := backendstorage.PVCForMigrationTarget(c.pvcStore, migration); backendStoragePVC != nil {
bs := backendstorage.NewBackendStorage(c.clientset, c.clusterConfig, c.storageClassStore, c.storageProfileStore, c.pvcStore)
bs.UpdateVolumeStatus(vmiCopy, backendStoragePVC)
}
err = c.patchVMI(vmi, vmiCopy)
if err != nil {
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedHandOverPodReason, fmt.Sprintf("Failed to set MigrationStat in VMI status. :%v", err))
return err
}
c.addHandOffKey(controller.MigrationKey(migration))
log.Log.Object(vmi).Infof("Handed off migration %s/%s to target virt-handler.", migration.Namespace, migration.Name)
c.recorder.Eventf(migration, k8sv1.EventTypeNormal, controller.SuccessfulHandOverPodReason, "Migration target pod is ready for preparation by virt-handler.")
return nil
}
func (c *Controller) markMigrationAbortInVmiStatus(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance) error {
if vmi.Status.MigrationState == nil {
return fmt.Errorf("migration state is nil when trying to mark migratio abortion in vmi status")
}
vmiCopy := vmi.DeepCopy()
vmiCopy.Status.MigrationState.AbortRequested = true
if !equality.Semantic.DeepEqual(vmi.Status, vmiCopy.Status) {
newStatus := vmiCopy.Status
oldStatus := vmi.Status
patchBytes, err := patch.New(
patch.WithTest("/status", oldStatus),
patch.WithReplace("/status", newStatus),
).GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
if err != nil {
msg := fmt.Sprintf("failed to set MigrationState in VMI status. :%v", err)
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedAbortMigrationReason, msg)
return fmt.Errorf("%s", msg)
}
log.Log.Object(vmi).Infof("Signaled migration %s/%s to be aborted.", migration.Namespace, migration.Name)
c.recorder.Eventf(migration, k8sv1.EventTypeNormal, controller.SuccessfulAbortMigrationReason, "Migration is ready to be canceled by virt-handler.")
}
return nil
}
func (c *Controller) handleTargetPodCreation(key string, migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, sourcePod *k8sv1.Pod) error {
c.migrationStartLock.Lock()
defer c.migrationStartLock.Unlock()
// Don't start new migrations if we wait for cache updates on migration target pods
if c.podExpectations.AllPendingCreations() > 0 {
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning), After: 1 * time.Second}, key)
return nil
} else if controller.VMIActivePodsCount(vmi, c.podIndexer) > 1 {
log.Log.Object(migration).Infof("Waiting to schedule target pod for migration because there are already multiple pods running for vmi %s/%s", vmi.Namespace, vmi.Name)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning), After: 1 * time.Second}, key)
return nil
}
// Don't start new migrations if we wait for migration object updates because of new target pods
runningMigrations, err := c.findRunningMigrations()
if err != nil {
return fmt.Errorf("failed to determin the number of running migrations: %v", err)
}
log.Log.V(3).Infof("number of running migrations: %d", len(runningMigrations))
// XXX: Make this configurable, think about limit per node, bandwidth per migration, and so on.
if len(runningMigrations) >= int(*c.clusterConfig.GetMigrationConfiguration().ParallelMigrationsPerCluster) {
log.Log.Object(migration).Infof("Waiting to schedule target pod for vmi [%s/%s] migration because total running parallel migration count [%d] is currently at the global cluster limit.", vmi.Namespace, vmi.Name, len(runningMigrations))
// The controller is busy with active migrations, mark ourselves as low priority to give more cycles to those
if c.clusterConfig.MigrationPriorityQueueEnabled() {
priority := migrationsutil.PriorityFromMigration(migration)
delay := getRequeueDelayForPriority(*priority)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: priority, After: delay}, key)
} else {
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityPending), After: 5 * time.Second}, key)
}
return nil
}
outboundMigrations := c.outboundMigrationsOnNode(vmi.Status.NodeName, runningMigrations)
if outboundMigrations >= int(*c.clusterConfig.GetMigrationConfiguration().ParallelOutboundMigrationsPerNode) {
// Let's ensure that we only have two outbound migrations per node
// XXX: Make this configurable, think about inbound migration limit, bandwidth per migration, and so on.
log.Log.Object(migration).Infof("Waiting to schedule target pod for vmi [%s/%s] migration because total running parallel outbound migrations on target node [%d] has hit outbound migrations per node limit.", vmi.Namespace, vmi.Name, outboundMigrations)
// The controller is busy with active migrations, mark ourselves as low priority to give more cycles to those
if c.clusterConfig.MigrationPriorityQueueEnabled() {
priority := migrationsutil.PriorityFromMigration(migration)
delay := getRequeueDelayForPriority(*priority)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: priority, After: delay}, key)
} else {
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityPending), After: 5 * time.Second}, key)
}
return nil
}
// migration was accepted into the system, now see if we
// should create the target pod
if vmi.IsRunning() || migration.IsDecentralizedTarget() {
err = c.handleBackendStorage(migration, vmi)
if err != nil {
return err
}
return c.createTargetPod(migration, vmi, sourcePod)
}
log.Log.Object(vmi).V(5).Info("target pod not created because vmi is not running and migration is not decentralized target migration")
return nil
}
func getRequeueDelayForPriority(priority int) time.Duration {
switch {
case priority >= migrationsutil.QueuePrioritySystemCritical:
return 1 * time.Second
case priority >= migrationsutil.QueuePriorityUserTriggered:
return 3 * time.Second
default:
return 5 * time.Second // the lowest as it was before
}
}
func (c *Controller) handleBackendStorage(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance) error {
if !backendstorage.IsBackendStorageNeeded(vmi) {
return nil
}
if migration.Status.MigrationState == nil {
migration.Status.MigrationState = &virtv1.VirtualMachineInstanceMigrationState{}
}
if !vmi.IsDecentralizedMigration() || vmi.IsMigrationSource() {
migration.Status.MigrationState.SourcePersistentStatePVCName = backendstorage.CurrentPVCName(vmi)
if migration.Status.MigrationState.SourcePersistentStatePVCName == "" {
return fmt.Errorf("no backend-storage PVC found in VMI volume status")
}
}
pvc := backendstorage.PVCForMigrationTarget(c.pvcStore, migration)
if pvc != nil {
migration.Status.MigrationState.TargetPersistentStatePVCName = pvc.Name
}
if migration.Status.MigrationState.TargetPersistentStatePVCName != "" {
// backend storage pvc has already been created or has ReadWriteMany access-mode
return nil
}
bs := backendstorage.NewBackendStorage(c.clientset, c.clusterConfig, c.storageClassStore, c.storageProfileStore, c.pvcStore)
key := controller.MigrationKey(migration)
c.pvcExpectations.ExpectCreations(key, 1)
backendStoragePVC, err := bs.CreatePVCForMigrationTarget(vmi, migration.Name)
if err != nil {
c.pvcExpectations.CreationObserved(key)
return err
}
migration.Status.MigrationState.TargetPersistentStatePVCName = backendStoragePVC.Name
if migration.Status.MigrationState.SourcePersistentStatePVCName == migration.Status.MigrationState.TargetPersistentStatePVCName {
// The PVC is shared between source and target, satisfy the expectation since the creation will never happen
c.pvcExpectations.CreationObserved(key)
}
return nil
}
func (c *Controller) createAttachmentPod(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, virtLauncherPod *k8sv1.Pod) error {
sourcePod, err := controller.CurrentVMIPod(vmi, c.podIndexer)
if err != nil {
return fmt.Errorf("failed to get current VMI pod: %v", err)
}
volumes := storagetypes.GetHotplugVolumes(vmi, sourcePod)
volumeNamesPVCMap, err := storagetypes.VirtVolumesToPVCMap(volumes, c.pvcStore, virtLauncherPod.Namespace)
if err != nil {
return fmt.Errorf("failed to get PVC map: %v", err)
}
// Reset the hotplug volume statuses to enforce mount
vmiCopy := vmi.DeepCopy()
vmiCopy.Status.VolumeStatus = []virtv1.VolumeStatus{}
attachmentPodTemplate, err := c.templateService.RenderHotplugAttachmentPodTemplate(volumes, virtLauncherPod, vmiCopy, volumeNamesPVCMap)
if err != nil {
return fmt.Errorf("failed to render attachment pod template: %v", err)
}
if attachmentPodTemplate.ObjectMeta.Labels == nil {
attachmentPodTemplate.ObjectMeta.Labels = make(map[string]string)
}
if attachmentPodTemplate.ObjectMeta.Annotations == nil {
attachmentPodTemplate.ObjectMeta.Annotations = make(map[string]string)
}
attachmentPodTemplate.ObjectMeta.Labels[virtv1.MigrationJobLabel] = string(migration.UID)
attachmentPodTemplate.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = migration.Name
key := controller.MigrationKey(migration)
c.podExpectations.ExpectCreations(key, 1)
attachmentPod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), attachmentPodTemplate, v1.CreateOptions{})
if err != nil {
c.podExpectations.CreationObserved(key)
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedCreatePodReason, "Error creating attachment pod: %v", err)
return fmt.Errorf("failed to create attachment pod: %v", err)
}
c.recorder.Eventf(migration, k8sv1.EventTypeNormal, controller.SuccessfulCreatePodReason, "Created attachment pod %s", attachmentPod.Name)
return nil
}
func isPodPendingUnschedulable(pod *k8sv1.Pod) bool {
if pod.Status.Phase != k8sv1.PodPending || pod.DeletionTimestamp != nil {
return false
}
for _, condition := range pod.Status.Conditions {
if condition.Type == k8sv1.PodScheduled &&
condition.Status == k8sv1.ConditionFalse &&
condition.Reason == k8sv1.PodReasonUnschedulable {
return true
}
}
return false
}
func timeSinceCreationSeconds(objectMeta *metav1.ObjectMeta) int64 {
now := time.Now().UTC().Unix()
creationTime := objectMeta.CreationTimestamp.Time.UTC().Unix()
seconds := now - creationTime
if seconds < 0 {
seconds = 0
}
return seconds
}
func (c *Controller) deleteTimedOutTargetPod(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, message string) error {
migrationKey, err := controller.KeyFunc(migration)
if err != nil {
return err
}
c.podExpectations.ExpectDeletions(migrationKey, []string{controller.PodKey(pod)})
err = c.clientset.CoreV1().Pods(vmi.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{})
if err != nil {
c.podExpectations.DeletionObserved(migrationKey, controller.PodKey(pod))
c.recorder.Eventf(migration, k8sv1.EventTypeWarning, controller.FailedDeletePodReason, "Error deleted migration target pod: %v", err)
return fmt.Errorf("failed to delete vmi migration target pod that reached pending pod timeout period.: %v", err)
}
log.Log.Object(vmi).Infof("Deleted pending migration target pod with uuid %s for migration %s with uuid %s with reason [%s]", string(pod.UID), migration.Name, string(migration.UID), message)
c.recorder.Event(migration, k8sv1.EventTypeNormal, controller.SuccessfulDeletePodReason, message)
return nil
}
func (c *Controller) getUnschedulablePendingTimeoutSeconds(migration *virtv1.VirtualMachineInstanceMigration) int64 {
timeout := c.unschedulablePendingTimeoutSeconds
customTimeoutStr, ok := migration.Annotations[virtv1.MigrationUnschedulablePodTimeoutSecondsAnnotation]
if !ok {
return timeout
}
newTimeout, err := strconv.Atoi(customTimeoutStr)
if err != nil {
log.Log.Object(migration).Reason(err).Errorf("Unable to parse unschedulable pending timeout value for migration")
return timeout
}
return int64(newTimeout)
}
func (c *Controller) getCatchAllPendingTimeoutSeconds(migration *virtv1.VirtualMachineInstanceMigration) int64 {
timeout := c.catchAllPendingTimeoutSeconds
customTimeoutStr, ok := migration.Annotations[virtv1.MigrationPendingPodTimeoutSecondsAnnotation]
if !ok {
return timeout
}
newTimeout, err := strconv.Atoi(customTimeoutStr)
if err != nil {
log.Log.Object(migration).Reason(err).Errorf("Unable to parse catch all pending timeout value for migration")
return timeout
}
return int64(newTimeout)
}
func (c *Controller) getUtilityVolumesTimeoutSeconds(migration *virtv1.VirtualMachineInstanceMigration) int64 {
migrationConfig := c.clusterConfig.GetMigrationConfiguration()
if migrationConfig == nil || migrationConfig.UtilityVolumesTimeout == nil {
return virtconfig.MigrationUtilityVolumesTimeoutSeconds
}
timeout := *migrationConfig.UtilityVolumesTimeout
if customTimeoutStr, ok := migration.Annotations[virtv1.MigrationUtilityVolumesTimeoutSecondsAnnotation]; ok {
if newTimeout, err := strconv.Atoi(customTimeoutStr); err == nil {
timeout = int64(newTimeout)
} else {
log.Log.Object(migration).Reason(err).Errorf("Unable to parse utility volumes timeout value for migration")
}
}
return timeout
}
func (c *Controller) handleUtilityVolumes(migrationCopy *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance) (bool, error) {
conditionManager := controller.NewVirtualMachineInstanceMigrationConditionManager()
if !controller.VMIHasUtilityVolumes(vmi) {
conditionManager.RemoveCondition(migrationCopy, virtv1.VirtualMachineInstanceMigrationBlockedByUtilityVolumes)
return false, nil
}
migrationKey, err := controller.KeyFunc(migrationCopy)
if err != nil {
return false, err
}
utilityVolumesTimeout := c.getUtilityVolumesTimeoutSeconds(migrationCopy)
secondsSpentWaiting := timeSinceCreationSeconds(&migrationCopy.ObjectMeta)
if secondsSpentWaiting >= utilityVolumesTimeout {
c.recorder.Eventf(
migrationCopy,
k8sv1.EventTypeWarning,
controller.FailedMigrationReason,
"Migration timeout waiting for utility volumes to detach from VMI [%s/%s]. Utility volumes still present after %d seconds.",
vmi.Namespace, vmi.Name, secondsSpentWaiting)
log.Log.Object(migrationCopy).Warningf("Migration timeout waiting for utility volumes to detach from VMI [%s/%s].", vmi.Namespace, vmi.Name)
migrationCopy.Status.Phase = virtv1.MigrationFailed
return true, nil
}
c.recorder.Eventf(
migrationCopy,
k8sv1.EventTypeWarning,
controller.UtilityVolumeMigrationPendingReason,
"Migration waiting for utility volumes to detach from VMI [%s/%s]. Will timeout in %d seconds.",
vmi.Namespace, vmi.Name, utilityVolumesTimeout-secondsSpentWaiting)
log.Log.Object(migrationCopy).V(3).Infof("Migration waiting for utility volumes to detach from VMI [%s/%s].", vmi.Namespace, vmi.Name)
if !conditionManager.HasCondition(migrationCopy, virtv1.VirtualMachineInstanceMigrationBlockedByUtilityVolumes) {
condition := virtv1.VirtualMachineInstanceMigrationCondition{
Type: virtv1.VirtualMachineInstanceMigrationBlockedByUtilityVolumes,
Status: k8sv1.ConditionTrue,
LastProbeTime: v1.Now(),
Reason: "UtilityVolumesPresent",
Message: fmt.Sprintf("Migration is waiting for utility volumes to detach. Will timeout in %d seconds.", utilityVolumesTimeout-secondsSpentWaiting),
}
migrationCopy.Status.Conditions = append(migrationCopy.Status.Conditions, condition)
}
delay := time.Second * time.Duration(utilityVolumesTimeout-secondsSpentWaiting)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning), After: delay}, migrationKey)
return true, nil
}
func (c *Controller) handlePendingPodTimeout(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) error {
if pod.Status.Phase != k8sv1.PodPending || pod.DeletionTimestamp != nil || pod.CreationTimestamp.IsZero() {
// only check if timeout has occurred if pod is pending and not already marked for deletion
return nil
}
migrationKey, err := controller.KeyFunc(migration)
if err != nil {
return err
}
unschedulableTimeout := c.getUnschedulablePendingTimeoutSeconds(migration)
catchAllTimeout := c.getCatchAllPendingTimeoutSeconds(migration)
secondsSpentPending := timeSinceCreationSeconds(&pod.ObjectMeta)
if isPodPendingUnschedulable(pod) {
c.alertIfHostModelIsUnschedulable(vmi, pod)
c.recorder.Eventf(
migration,
k8sv1.EventTypeWarning,
controller.MigrationTargetPodUnschedulable,
"Migration target pod for VMI [%s/%s] is currently unschedulable.", vmi.Namespace, vmi.Name)
log.Log.Object(migration).Warningf("Migration target pod for VMI [%s/%s] is currently unschedulable.", vmi.Namespace, vmi.Name)
if secondsSpentPending >= unschedulableTimeout {
return c.deleteTimedOutTargetPod(migration, vmi, pod, fmt.Sprintf("unschedulable pod %s/%s timeout period exceeded", pod.Namespace, pod.Name))
} else {
// Make sure we check this again after some time
delay := time.Second * time.Duration(unschedulableTimeout-secondsSpentPending)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning), After: delay}, migrationKey)
}
}
if secondsSpentPending >= catchAllTimeout {
return c.deleteTimedOutTargetPod(migration, vmi, pod, fmt.Sprintf("pending pod %s/%s timeout period exceeded", pod.Namespace, pod.Name))
} else {
// Make sure we check this again after some time
delay := time.Second * time.Duration(catchAllTimeout-secondsSpentPending)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning), After: delay}, migrationKey)
}
return nil
}
func (c *Controller) sync(key string, migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance, pods []*k8sv1.Pod) error {
var pod *k8sv1.Pod = nil
targetPodExists := len(pods) > 0
if targetPodExists {
pod = pods[0]
}
if vmiDeleted := vmi == nil || vmi.DeletionTimestamp != nil; vmiDeleted {
return nil
}
if migrationFinalizedOnVMI := vmi.IsMigrationSynchronized(migration) && vmi.Status.MigrationState.MigrationUID == migration.UID &&
vmi.Status.MigrationState.Completed; migrationFinalizedOnVMI {
return nil
}
canMigrate, err := c.canMigrateVMI(migration, vmi)
if err != nil {
return err
}
if !canMigrate {
return fmt.Errorf("vmi is ineligible for migration because another migration job is running")
}
switch migration.Status.Phase {
case virtv1.MigrationPending:
if migration.DeletionTimestamp != nil {
return c.handlePreHandoffMigrationCancel(migration, vmi, pod)
}
if err = c.handleMigrationBackoff(key, vmi, migration); errors.Is(err, migrationBackoffError) {
warningMsg := fmt.Sprintf("backoff migrating vmi %s/%s", vmi.Namespace, vmi.Name)
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, err.Error(), warningMsg)
return nil
}
if controller.VMIHasUtilityVolumes(vmi) {
return nil
}
if !migration.IsLocalOrDecentralizedTarget() {
return nil
}
if !targetPodExists {
var sourcePod *k8sv1.Pod
var err error
if !migration.IsDecentralized() {
log.Log.Object(vmi).V(5).Info("regular migration creating target pod in same namespace as source")
sourcePod, err = controller.CurrentVMIPod(vmi, c.podIndexer)
if err != nil {
log.Log.Reason(err).Error("Failed to fetch pods for namespace from cache.")
return err
}
if !controller.PodExists(sourcePod) {
// for instance sudden deletes can cause this. In this
// case we don't have to do anything in the creation flow anymore.
// Once the VMI is in a final state or deleted the migration
// will be marked as failed too.
return nil
}
} else {
log.Log.Object(vmi).V(5).Info("decentralized migration creating target pod in vmi namespace, source pod based on target VMI")
vmiCopy := vmi.DeepCopy()
if tpm.HasPersistentDevice(&vmiCopy.Spec) {
// This is a decentralized target, generate the source pod template, we don't care about
// the backend-storage PVC here because it will be created in the target namespace/cluster.
// this is purely a fake source pod template.
vmiCopy.Spec.Domain.Devices.TPM.Enabled = pointer.P(false)
}
if backendstorage.HasPersistentEFI(&vmiCopy.Spec) {
// This is a decentralized target, generate the source pod template, we don't care about
// the backend-storage PVC here because it will be created in the target namespace/cluster.
// this is purely a fake source pod template.
vmiCopy.Spec.Domain.Firmware.Bootloader.EFI.Persistent = pointer.P(false)
}
sourcePod, err = c.templateService.RenderLaunchManifest(vmiCopy)
if err != nil {
return fmt.Errorf("failed to render fake source pod launch manifest: %v", err)
}
}
// patch VMI annotations and set RuntimeUser in preparation for target pod creation
patches := c.setupVMIRuntimeUser(vmi)
if !patches.IsEmpty() {
patchBytes, err := patches.GeneratePayload()
if err != nil {
return err
}
vmi, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
if err != nil {
return fmt.Errorf("failed to set VMI RuntimeUser: %v", err)
}
}
return c.handleTargetPodCreation(key, migration, vmi, sourcePod)
} else if controller.IsPodReady(pod) {
if controller.VMIHasHotplugVolumes(vmi) {
attachmentPods, err := controller.AttachmentPods(pod, c.podIndexer)
if err != nil {
return fmt.Errorf(failedGetAttractionPodsFmt, err)
}
if len(attachmentPods) == 0 {
log.Log.Object(migration).V(5).Infof("Creating attachment pod for vmi %s/%s on node %s", vmi.Namespace, vmi.Name, pod.Spec.NodeName)
return c.createAttachmentPod(migration, vmi, pod)
}
}
} else {
return c.handlePendingPodTimeout(migration, vmi, pod)
}
case virtv1.MigrationScheduling:
if migration.DeletionTimestamp != nil {
return c.handlePreHandoffMigrationCancel(migration, vmi, pod)
}
if migration.IsLocalOrDecentralizedSource() && vmi.IsRunning() {
if err := c.updateVMIMigrationSourceWithPodInfo(migration, vmi); err != nil {
return err
}
}
if targetPodExists {
return c.handlePendingPodTimeout(migration, vmi, pod)
}
case virtv1.MigrationScheduled:
if migration.DeletionTimestamp != nil && !c.isMigrationHandedOff(migration, vmi) {
return c.handlePreHandoffMigrationCancel(migration, vmi, pod)
}
// once target pod is running, then alert the VMI of the migration by
// setting the target and source nodes. This kicks off the preparation stage.
if targetPodExists && controller.IsPodReady(pod) {
if err := c.updateTargetPodNetworkInfo(vmi, pod); err != nil {
return err
}
return c.handleTargetPodHandoff(migration, vmi, pod)
}
case virtv1.MigrationPreparingTarget, virtv1.MigrationTargetReady, virtv1.MigrationFailed:
if migration.IsLocalOrDecentralizedTarget() && (!targetPodExists || controller.PodIsDown(pod)) &&
vmi.IsMigrationSynchronized(migration) &&
len(vmi.Status.MigrationState.TargetDirectMigrationNodePorts) == 0 &&
vmi.Status.MigrationState.StartTimestamp == nil &&
!vmi.Status.MigrationState.Failed &&
!vmi.Status.MigrationState.Completed {
err = c.handleMarkMigrationFailedOnVMI(migration, vmi)
if err != nil {
return err
}
}
return nil
case virtv1.MigrationRunning:
if migration.DeletionTimestamp != nil && vmi.IsMigrationSynchronized(migration) {
err = c.markMigrationAbortInVmiStatus(migration, vmi)
if err != nil {
return err
}
}
case virtv1.MigrationWaitingForSync:
// Waiting for sync, setup vmi migration target status
origVMI := vmi.DeepCopy()
c.initializeMigrateTargetState(migration, vmi)
return c.patchVMI(origVMI, vmi)
case virtv1.MigrationSynchronizing:
origVMI := vmi.DeepCopy()
c.initializeMigrateSourceState(migration, vmi)
return c.patchVMI(origVMI, vmi)
}
return nil
}
func (c *Controller) setupVMIRuntimeUser(vmi *virtv1.VirtualMachineInstance) *patch.PatchSet {
patchSet := patch.New()
if !c.clusterConfig.RootEnabled() {
// The cluster is configured for non-root VMs, ensure the VMI is non-root.
// If the VMI is root, the migration will be a root -> non-root migration.
if vmi.Status.RuntimeUser != util.NonRootUID {
patchSet.AddOption(patch.WithReplace("/status/runtimeUser", util.NonRootUID))
}
// This is required in order to be able to update from v0.43-v0.51 to v0.52+
if vmi.Annotations == nil {
patchSet.AddOption(patch.WithAdd("/metadata/annotations", map[string]string{virtv1.DeprecatedNonRootVMIAnnotation: "true"}))
} else if _, ok := vmi.Annotations[virtv1.DeprecatedNonRootVMIAnnotation]; !ok {
patchSet.AddOption(patch.WithAdd(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(virtv1.DeprecatedNonRootVMIAnnotation)), "true"))
}
} else {
// The cluster is configured for root VMs, ensure the VMI is root.
// If the VMI is non-root, the migration will be a non-root -> root migration.
if vmi.Status.RuntimeUser != util.RootUser {
patchSet.AddOption(patch.WithReplace("/status/runtimeUser", util.RootUser))
}
if _, ok := vmi.Annotations[virtv1.DeprecatedNonRootVMIAnnotation]; ok {
patchSet.AddOption(patch.WithRemove(fmt.Sprintf("/metadata/annotations/%s", patch.EscapeJSONPointer(virtv1.DeprecatedNonRootVMIAnnotation))))
}
}
return patchSet
}
func (c *Controller) listMatchingTargetPods(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance) ([]*k8sv1.Pod, error) {
objs, err := c.podIndexer.ByIndex(vmiPodIndex, string(vmi.UID))
if err != nil {
return nil, err
}
var pods []*k8sv1.Pod
for _, obj := range objs {
pod := obj.(*k8sv1.Pod)
if value, ok := pod.Labels[virtv1.MigrationJobLabel]; ok && value == string(migration.UID) {
if value, ok := pod.Labels[virtv1.AppLabel]; ok && value == "virt-launcher" {
pods = append(pods, pod)
}
}
}
return pods, nil
}
func (c *Controller) addMigration(obj interface{}) {
c.enqueueMigration(obj)
}
func (c *Controller) deleteMigration(obj interface{}) {
c.enqueueMigration(obj)
}
func (c *Controller) updateMigration(_, curr interface{}) {
c.enqueueMigration(curr)
}
func (c *Controller) enqueueMigration(obj interface{}) {
logger := log.Log
migration := obj.(*virtv1.VirtualMachineInstanceMigration)
key, err := controller.KeyFunc(migration)
if err != nil {
logger.Object(migration).Reason(err).Error("Failed to extract key from migration.")
return
}
// If the migration is running, it will default to the active priority.
if migration.Status.Phase == virtv1.MigrationRunning {
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning)}, key)
} else {
if c.clusterConfig.MigrationPriorityQueueEnabled() {
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: migrationsutil.PriorityFromMigration(migration)}, key)
} else {
// If the key is already in the queue at active priority or higher, it will keep that priority.
// If the key is already in the queue at pending priority, it will be bumped to 0 (still below all active ones).
c.Queue.Add(key)
}
}
}
func (c *Controller) getControllerOf(pod *k8sv1.Pod) *v1.OwnerReference {
t := true
return &v1.OwnerReference{
Kind: virtv1.VirtualMachineInstanceMigrationGroupVersionKind.Kind,
Name: pod.Annotations[virtv1.MigrationJobNameAnnotation],
UID: types.UID(pod.Labels[virtv1.MigrationJobLabel]),
Controller: &t,
BlockOwnerDeletion: &t,
}
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *v1.OwnerReference) *virtv1.VirtualMachineInstanceMigration {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != virtv1.VirtualMachineInstanceMigrationGroupVersionKind.Kind {
return nil
}
migration, exists, err := c.migrationIndexer.GetByKey(controller.NamespacedKey(namespace, controllerRef.Name))
if err != nil {
return nil
}
if !exists {
return nil
}
if migration.(*virtv1.VirtualMachineInstanceMigration).UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return migration.(*virtv1.VirtualMachineInstanceMigration)
}
// When a pod is created, enqueue the migration that manages it and update its podExpectations.
func (c *Controller) addPod(obj interface{}) {
pod := obj.(*k8sv1.Pod)
if pod.DeletionTimestamp != nil {
// on a restart of the controller manager, it's possible a new pod shows up in a state that
// is already pending deletion. Prevent the pod from being a creation observation.
c.deletePod(pod)
return
}
controllerRef := c.getControllerOf(pod)
migration := c.resolveControllerRef(pod.Namespace, controllerRef)
if migration == nil {
return
}
migrationKey, err := controller.KeyFunc(migration)
if err != nil {
return
}
log.Log.V(4).Object(pod).Infof("Pod created for key %s", migrationKey)
c.podExpectations.CreationObserved(migrationKey)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning)}, migrationKey)
}
// When a pod is updated, figure out what migration manages it and wake them
// up. If the labels of the pod have changed we need to awaken both the old
// and new migration. old and cur must be *v1.Pod types.
func (c *Controller) updatePod(old, cur interface{}) {
curPod := cur.(*k8sv1.Pod)
oldPod := old.(*k8sv1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs.
return
}
labelChanged := !equality.Semantic.DeepEqual(curPod.Labels, oldPod.Labels)
if curPod.DeletionTimestamp != nil {
// having a pod marked for deletion is enough to count as a deletion expectation
c.deletePod(curPod)
if labelChanged {
// we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
c.deletePod(oldPod)
}
return
}
curControllerRef := c.getControllerOf(curPod)
oldControllerRef := c.getControllerOf(oldPod)
controllerRefChanged := !equality.Semantic.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged && oldControllerRef != nil {
// The ControllerRef was changed. Sync the old controller, if any.
if migration := c.resolveControllerRef(oldPod.Namespace, oldControllerRef); migration != nil {
c.enqueueMigration(migration)
}
}
migration := c.resolveControllerRef(curPod.Namespace, curControllerRef)
if migration == nil {
return
}
log.Log.V(4).Object(curPod).Infof("Pod updated")
migrationKey, err := controller.KeyFunc(migration)
if err != nil {
return
}
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning)}, migrationKey)
}
// When a resourceQuota is updated, figure out if there are pending migration in the namespace
// if there are we should push them into the queue to accelerate the target creation process
func (c *Controller) updateResourceQuota(_, cur interface{}) {
curResourceQuota := cur.(*k8sv1.ResourceQuota)
log.Log.V(4).Object(curResourceQuota).Infof("ResourceQuota updated")
objs, _ := c.migrationIndexer.ByIndex(cache.NamespaceIndex, curResourceQuota.Namespace)
for _, obj := range objs {
migration := obj.(*virtv1.VirtualMachineInstanceMigration)
if migration.Status.Conditions == nil {
continue
}
for _, cond := range migration.Status.Conditions {
if cond.Type == virtv1.VirtualMachineInstanceMigrationRejectedByResourceQuota {
c.enqueueMigration(migration)
}
}
}
return
}
// When a resourceQuota is deleted, figure out if there are pending migration in the namespace
// if there are we should push them into the queue to accelerate the target creation process
func (c *Controller) deleteResourceQuota(obj interface{}) {
resourceQuota := obj.(*k8sv1.ResourceQuota)
log.Log.V(4).Object(resourceQuota).Infof("ResourceQuota deleted")
objs, _ := c.migrationIndexer.ByIndex(cache.NamespaceIndex, resourceQuota.Namespace)
for _, obj := range objs {
migration := obj.(*virtv1.VirtualMachineInstanceMigration)
if migration.Status.Conditions == nil {
continue
}
for _, cond := range migration.Status.Conditions {
if cond.Type == virtv1.VirtualMachineInstanceMigrationRejectedByResourceQuota {
c.enqueueMigration(migration)
}
}
}
return
}
func (c *Controller) updateKubeVirt(org, cur interface{}) {
curKubevirt := cur.(*virtv1.KubeVirt)
orgKubevirt := org.(*virtv1.KubeVirt)
if !slices.Equal(curKubevirt.Status.SynchronizationAddresses, orgKubevirt.Status.SynchronizationAddresses) {
// sync address was updated, update all active migrations
for _, obj := range c.migrationIndexer.List() {
migration, ok := obj.(*virtv1.VirtualMachineInstanceMigration)
if !ok {
log.Log.Errorf("found unknown object in migration store %v", obj)
continue
}
if !migration.IsFinal() {
c.enqueueMigration(migration)
}
}
}
return
}
// When a pod is deleted, enqueue the migration that manages the pod and update its podExpectations.
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (c *Controller) deletePod(obj interface{}) {
pod, ok := obj.(*k8sv1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new migration will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error(failedToProcessDeleteNotificationErrMsg)
return
}
pod, ok = tombstone.Obj.(*k8sv1.Pod)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a pod %#v", obj)).Error(failedToProcessDeleteNotificationErrMsg)
return
}
}
controllerRef := c.getControllerOf(pod)
migration := c.resolveControllerRef(pod.Namespace, controllerRef)
if migration == nil {
return
}
migrationKey, err := controller.KeyFunc(migration)
if err != nil {
return
}
c.podExpectations.DeletionObserved(migrationKey, controller.PodKey(pod))
c.enqueueMigration(migration)
}
func (c *Controller) addPVC(obj interface{}) {
pvc := obj.(*k8sv1.PersistentVolumeClaim)
if pvc.DeletionTimestamp != nil {
return
}
if !strings.Contains(pvc.Name, backendstorage.PVCPrefix) {
return
}
migrationName, exists := pvc.Labels[virtv1.MigrationNameLabel]
if !exists {
return
}
migrationKey := controller.NamespacedKey(pvc.Namespace, migrationName)
c.pvcExpectations.CreationObserved(migrationKey)
c.Queue.AddWithOpts(priorityqueue.AddOpts{Priority: pointer.P(migrationsutil.QueuePriorityRunning)}, migrationKey)
}
type vmimCollection []*virtv1.VirtualMachineInstanceMigration
func (c vmimCollection) Len() int {
return len(c)
}
func (c vmimCollection) Less(i, j int) bool {
t1 := &c[i].CreationTimestamp
t2 := &c[j].CreationTimestamp
return t1.Before(t2)
}
func (c vmimCollection) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
func (c *Controller) garbageCollectFinalizedMigrations(vmi *virtv1.VirtualMachineInstance) error {
var finalizedMigrations []string
migrations, err := c.listMigrationsMatchingVMI(vmi.Namespace, vmi.Name)
if err != nil {
return err
}
// Oldest first
sort.Sort(vmimCollection(migrations))
for _, migration := range migrations {
if migration.IsFinal() && migration.DeletionTimestamp == nil {
finalizedMigrations = append(finalizedMigrations, migration.Name)
}
}
// only keep the oldest 5 finalized migration objects
garbageCollectionCount := len(finalizedMigrations) - defaultFinalizedMigrationGarbageCollectionBuffer
if garbageCollectionCount <= 0 {
return nil
}
for i := range garbageCollectionCount {
err = c.clientset.VirtualMachineInstanceMigration(vmi.Namespace).Delete(context.Background(), finalizedMigrations[i], v1.DeleteOptions{})
if err != nil && k8serrors.IsNotFound(err) {
// This is safe to ignore. It's possible in some
// scenarios that the migration we're trying to garbage
// collect has already disappeared. Let's log it as debug
// and suppress the error in this situation.
log.Log.Reason(err).Infof("error encountered when garbage collecting migration object %s/%s", vmi.Namespace, finalizedMigrations[i])
} else if err != nil {
return err
}
}
return nil
}
// takes a namespace and returns all migrations listening for this vmi
func (c *Controller) listMigrationsMatchingVMI(namespace, name string) ([]*virtv1.VirtualMachineInstanceMigration, error) {
objs, err := c.migrationIndexer.ByIndex(controller.ByVMINameIndex, fmt.Sprintf("%s/%s", namespace, name))
if err != nil {
return nil, err
}
var migrations []*virtv1.VirtualMachineInstanceMigration
for _, obj := range objs {
migrations = append(migrations, obj.(*virtv1.VirtualMachineInstanceMigration))
}
return migrations, nil
}
func (c *Controller) listBackoffEligibleMigrations(namespace string, name string) ([]*virtv1.VirtualMachineInstanceMigration, error) {
var eligibleMigrations []*virtv1.VirtualMachineInstanceMigration
migrations, err := c.listMigrationsMatchingVMI(namespace, name)
if err != nil {
return eligibleMigrations, err
}
for _, m := range migrations {
_, isEvacuation := m.Annotations[virtv1.EvacuationMigrationAnnotation]
_, isWorkload := m.Annotations[virtv1.WorkloadUpdateMigrationAnnotation]
if isEvacuation || isWorkload {
eligibleMigrations = append(eligibleMigrations, m)
}
}
return eligibleMigrations, nil
}
func (c *Controller) addVMI(obj interface{}) {
vmi := obj.(*virtv1.VirtualMachineInstance)
if vmi.DeletionTimestamp != nil {
c.deleteVMI(vmi)
return
}
migrations, err := c.listMigrationsMatchingVMI(vmi.Namespace, vmi.Name)
if err != nil {
return
}
for _, migration := range migrations {
c.enqueueMigration(migration)
}
}
func (c *Controller) updateVMI(old, cur interface{}) {
curVMI := cur.(*virtv1.VirtualMachineInstance)
oldVMI := old.(*virtv1.VirtualMachineInstance)
if curVMI.ResourceVersion == oldVMI.ResourceVersion {
// Periodic resync will send update events for all known VMIs.
// Two different versions of the same vmi will always
// have different RVs.
return
}
labelChanged := !equality.Semantic.DeepEqual(curVMI.Labels, oldVMI.Labels)
if curVMI.DeletionTimestamp != nil {
// having a DataVolume marked for deletion is enough
// to count as a deletion expectation
c.deleteVMI(curVMI)
if labelChanged {
// we don't need to check the oldVMI.DeletionTimestamp
// because DeletionTimestamp cannot be unset.
c.deleteVMI(oldVMI)
}
return
}
migrations, err := c.listMigrationsMatchingVMI(curVMI.Namespace, curVMI.Name)
if err != nil {
log.Log.Object(curVMI).Errorf("Error encountered during datavolume update: %v", err)
return
}
for _, migration := range migrations {
log.Log.V(4).Object(curVMI).Infof("vmi updated for migration %s", migration.Name)
c.enqueueMigration(migration)
}
}
func (c *Controller) deleteVMI(obj interface{}) {
vmi, ok := obj.(*virtv1.VirtualMachineInstance)
// When a delete is dropped, the relist will notice a vmi in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the vmi
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error(failedToProcessDeleteNotificationErrMsg)
return
}
vmi, ok = tombstone.Obj.(*virtv1.VirtualMachineInstance)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a vmi %#v", obj)).Error(failedToProcessDeleteNotificationErrMsg)
return
}
}
migrations, err := c.listMigrationsMatchingVMI(vmi.Namespace, vmi.Name)
if err != nil {
return
}
for _, migration := range migrations {
log.Log.V(4).Object(vmi).Infof("vmi deleted for migration %s", migration.Name)
c.enqueueMigration(migration)
}
}
func (c *Controller) outboundMigrationsOnNode(node string, runningMigrations []*virtv1.VirtualMachineInstanceMigration) int {
sum := 0
for _, migration := range runningMigrations {
key := controller.NamespacedKey(migration.Namespace, migration.Spec.VMIName)
if obj, exists, _ := c.vmiStore.GetByKey(key); exists {
vmi := obj.(*virtv1.VirtualMachineInstance)
if vmi.Status.NodeName == node || (vmi.Status.MigrationState != nil && vmi.Status.MigrationState.SourceNode == node) {
sum++
}
}
}
return sum
}
// findRunningMigrations calculates how many migrations are running or in flight to be triggered to running
// Migrations which are in running phase are added alongside with migrations which are still pending but
// where we already see a target pod.
func (c *Controller) findRunningMigrations() ([]*virtv1.VirtualMachineInstanceMigration, error) {
// Don't start new migrations if we wait for migration object updates because of new target pods
notFinishedMigrations := migrationsutil.ListUnfinishedMigrations(c.migrationIndexer)
var runningMigrations []*virtv1.VirtualMachineInstanceMigration
for _, migration := range notFinishedMigrations {
if migration.IsRunning() {
runningMigrations = append(runningMigrations, migration)
continue
}
key := controller.NamespacedKey(migration.Namespace, migration.Spec.VMIName)
vmi, exists, err := c.vmiStore.GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
continue
}
pods, err := c.listMatchingTargetPods(migration, vmi.(*virtv1.VirtualMachineInstance))
if err != nil {
return nil, err
}
if len(pods) > 0 {
runningMigrations = append(runningMigrations, migration)
}
}
return runningMigrations, nil
}
func (c *Controller) getNodeForVMI(vmi *virtv1.VirtualMachineInstance) (*k8sv1.Node, error) {
obj, exists, err := c.nodeStore.GetByKey(vmi.Status.NodeName)
if err != nil {
return nil, fmt.Errorf("cannot get nodes to migrate VMI with host-model CPU. error: %v", err)
} else if !exists {
return nil, fmt.Errorf("node \"%s\" associated with vmi \"%s\" does not exist", vmi.Status.NodeName, vmi.Name)
}
node := obj.(*k8sv1.Node)
return node, nil
}
func (c *Controller) alertIfHostModelIsUnschedulable(vmi *virtv1.VirtualMachineInstance, targetPod *k8sv1.Pod) {
fittingNodeFound := false
if cpu := vmi.Spec.Domain.CPU; cpu == nil || cpu.Model != virtv1.CPUModeHostModel {
return
}
requiredNodeLabels := map[string]string{}
for key, value := range targetPod.Spec.NodeSelector {
if strings.HasPrefix(key, virtv1.SupportedHostModelMigrationCPU) || strings.HasPrefix(key, virtv1.CPUFeatureLabel) {
requiredNodeLabels[key] = value
}
}
nodes := c.nodeStore.List()
for _, nodeInterface := range nodes {
node := nodeInterface.(*k8sv1.Node)
if node.Name == vmi.Status.NodeName {
continue // avoid checking the VMI's source node
}
if isNodeSuitableForHostModelMigration(node, requiredNodeLabels) {
log.Log.Object(vmi).Infof("Node %s is suitable to run vmi %s host model cpu mode (more nodes may fit as well)", node.Name, vmi.Name)
fittingNodeFound = true
break
}
}
if !fittingNodeFound {
warningMsg := fmt.Sprintf("Migration cannot proceed since no node is suitable to run the required CPU model / required features: %v", requiredNodeLabels)
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.NoSuitableNodesForHostModelMigration, warningMsg)
log.Log.Object(vmi).Warning(warningMsg)
}
}
func getNodeSelectorsFromVMIMigrationSourceState(sourceState *virtv1.VirtualMachineInstanceMigrationSourceState) (map[string]string, error) {
result, nodeSelectorKeyForHostModel, err := getHostCpuModelFromMap(sourceState.NodeSelectors)
if err != nil {
return nil, err
}
log.Log.V(3).Infof("cpu model label selector (\"%s\") defined for migration target pod", nodeSelectorKeyForHostModel)
return result, nil
}
func prepareNodeSelectorForHostCpuModel(node *k8sv1.Node, pod *k8sv1.Pod, sourcePodNodeSelector map[string]string) (map[string]string, error) {
result := make(map[string]string)
migratedAtLeastOnce := false
// if the vmi already migrated before it should include node selector that consider CPUModelLabel
for key, value := range sourcePodNodeSelector {
if strings.Contains(key, virtv1.CPUFeatureLabel) || strings.Contains(key, virtv1.SupportedHostModelMigrationCPU) {
result[key] = value
migratedAtLeastOnce = true
}
}
if !migratedAtLeastOnce {
// only copy node label keys when the VM has not migrated before. Otherwise if we migrate again
// we could be adding labels we don't want which could prevent migrating back to the original node.
hostCpuModelMap, nodeSelectorKeyForHostModel, err := getHostCpuModelFromMap(node.Labels)
if err != nil {
return nil, err
}
maps.Copy(result, hostCpuModelMap)
log.Log.Object(pod).V(5).Infof("cpu model label selector (\"%s\") defined for migration target pod", nodeSelectorKeyForHostModel)
}
return result, nil
}
func getHostCpuModelFromMap(selectorMap map[string]string) (map[string]string, string, error) {
result := make(map[string]string)
var hostCpuModel, nodeSelectorKeyForHostModel, hostModelLabelValue string
for key, value := range selectorMap {
if strings.HasPrefix(key, virtv1.HostModelCPULabel) {
hostCpuModel = strings.TrimPrefix(key, virtv1.HostModelCPULabel)
hostModelLabelValue = value
}
if strings.HasPrefix(key, virtv1.HostModelRequiredFeaturesLabel) {
requiredFeature := strings.TrimPrefix(key, virtv1.HostModelRequiredFeaturesLabel)
result[virtv1.CPUFeatureLabel+requiredFeature] = value
}
}
if hostCpuModel == "" {
return nil, "", fmt.Errorf("unable to locate host cpu model, does not contain label \"%s\" with information", virtv1.HostModelCPULabel)
}
nodeSelectorKeyForHostModel = virtv1.SupportedHostModelMigrationCPU + hostCpuModel
result[nodeSelectorKeyForHostModel] = hostModelLabelValue
return result, nodeSelectorKeyForHostModel, nil
}
func isNodeSuitableForHostModelMigration(node *k8sv1.Node, requiredNodeLabels map[string]string) bool {
for key, value := range requiredNodeLabels {
nodeValue, ok := node.Labels[key]
if !ok || nodeValue != value {
return false
}
}
return true
}
func getCPUVendorLabelKey(labels map[string]string) string {
for key := range labels {
if strings.HasPrefix(key, virtv1.CPUModelVendorLabel) {
return key
}
}
return ""
}
func (c *Controller) matchMigrationPolicy(vmi *virtv1.VirtualMachineInstance, clusterMigrationConfiguration *virtv1.MigrationConfiguration) error {
vmiNamespace, err := c.clientset.CoreV1().Namespaces().Get(context.Background(), vmi.Namespace, v1.GetOptions{})
if err != nil {
return err
}
// Fetch cluster policies
var policies []v1alpha1.MigrationPolicy
migrationInterfaceList := c.migrationPolicyStore.List()
for _, obj := range migrationInterfaceList {
policy := obj.(*v1alpha1.MigrationPolicy)
policies = append(policies, *policy)
}
policiesListObj := v1alpha1.MigrationPolicyList{Items: policies}
// Override cluster-wide migration configuration if migration policy is matched
matchedPolicy := matchPolicy(&policiesListObj, vmi, vmiNamespace)
if matchedPolicy == nil {
log.Log.Object(vmi).Reason(err).Infof("no migration policy matched for VMI %s", vmi.Name)
return nil
}
isUpdated, err := matchedPolicy.GetMigrationConfByPolicy(clusterMigrationConfiguration)
if err != nil {
return err
}
if isUpdated {
vmi.Status.MigrationState.MigrationPolicyName = &matchedPolicy.Name
vmi.Status.MigrationState.MigrationConfiguration = clusterMigrationConfiguration
log.Log.Object(vmi).Infof("migration is updated by migration policy named %s.", matchedPolicy.Name)
}
return nil
}
func (c *Controller) isMigrationPolicyMatched(vmi *virtv1.VirtualMachineInstance) bool {
if vmi == nil {
return false
}
migrationPolicyName := vmi.Status.MigrationState.MigrationPolicyName
return migrationPolicyName != nil && *migrationPolicyName != ""
}
func (c *Controller) isMigrationHandedOff(migration *virtv1.VirtualMachineInstanceMigration, vmi *virtv1.VirtualMachineInstance) bool {
if vmi.IsMigrationSynchronized(migration) && vmi.Status.MigrationState.MigrationUID == migration.UID {
return true
}
migrationKey := controller.MigrationKey(migration)
c.handOffLock.Lock()
defer c.handOffLock.Unlock()
_, isHandedOff := c.handOffMap[migrationKey]
return isHandedOff
}
func (c *Controller) addHandOffKey(migrationKey string) {
c.handOffLock.Lock()
defer c.handOffLock.Unlock()
c.handOffMap[migrationKey] = struct{}{}
}
func (c *Controller) removeHandOffKey(migrationKey string) {
c.handOffLock.Lock()
defer c.handOffLock.Unlock()
delete(c.handOffMap, migrationKey)
}
func getComputeContainer(pod *k8sv1.Pod) *k8sv1.Container {
for _, container := range pod.Spec.Containers {
if container.Name == "compute" {
return &container
}
}
return nil
}
func getTargetPodLimitsCount(pod *k8sv1.Pod) (int64, error) {
cc := getComputeContainer(pod)
if cc == nil {
return 0, fmt.Errorf("Could not find VMI compute container")
}
cpuLimit, ok := cc.Resources.Limits[k8sv1.ResourceCPU]
if !ok {
return 0, fmt.Errorf("Could not find dedicated CPU limit in VMI compute container")
}
return cpuLimit.Value(), nil
}
func getTargetPodMemoryRequests(pod *k8sv1.Pod) (string, error) {
cc := getComputeContainer(pod)
if cc == nil {
return "", fmt.Errorf("Could not find VMI compute container")
}
memReq, ok := cc.Resources.Requests[k8sv1.ResourceMemory]
if !ok {
return "", fmt.Errorf("Could not find memory request in VMI compute container")
}
if hugePagesReq, ok := cc.Resources.Requests[k8sv1.ResourceHugePagesPrefix+"2Mi"]; ok {
memReq.Add(hugePagesReq)
}
if hugePagesReq, ok := cc.Resources.Requests[k8sv1.ResourceHugePagesPrefix+"1Gi"]; ok {
memReq.Add(hugePagesReq)
}
return memReq.String(), nil
}
package migration
import (
k8sv1 "k8s.io/api/core/v1"
k6tv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/migrations/v1alpha1"
)
type migrationPolicyMatchScore struct {
matchingVMILabels int
matchingNSLabels int
}
func (score migrationPolicyMatchScore) equals(otherScore migrationPolicyMatchScore) bool {
return score.matchingVMILabels == otherScore.matchingVMILabels &&
score.matchingNSLabels == otherScore.matchingNSLabels
}
func (score migrationPolicyMatchScore) greaterThan(otherScore migrationPolicyMatchScore) bool {
thisTotalScore := score.matchingNSLabels + score.matchingVMILabels
otherTotalScore := otherScore.matchingNSLabels + otherScore.matchingVMILabels
if thisTotalScore == otherTotalScore {
return score.matchingVMILabels > otherScore.matchingVMILabels
}
return thisTotalScore > otherTotalScore
}
func (score migrationPolicyMatchScore) lessThan(otherScore migrationPolicyMatchScore) bool {
return !score.equals(otherScore) && !score.greaterThan(otherScore)
}
// matchPolicy returns the policy that is matched to the vmi, or nil of no policy is matched.
//
// Since every policy can specify VMI and Namespace labels to match to, matching is done by returning the most
// detailed policy, meaning the policy that matches the VMI and specifies the most labels that matched either
// the VMI or its namespace labels.
//
// If two policies are matched and have the same level of details (i.e. same number of matching labels) the matched
// policy is chosen by policies' names ordered by lexicographic order. The reason is to create a rather arbitrary yet
// deterministic way of matching policies.
func matchPolicy(policyList *v1alpha1.MigrationPolicyList, vmi *k6tv1.VirtualMachineInstance, vmiNamespace *k8sv1.Namespace) *v1alpha1.MigrationPolicy {
var mathingPolicies []v1alpha1.MigrationPolicy
bestScore := migrationPolicyMatchScore{}
for _, policy := range policyList.Items {
doesMatch, curScore := countMatchingLabels(&policy, vmi.Labels, vmiNamespace.Labels)
if !doesMatch || curScore.lessThan(bestScore) {
continue
} else if curScore.greaterThan(bestScore) {
bestScore = curScore
mathingPolicies = []v1alpha1.MigrationPolicy{policy}
} else {
mathingPolicies = append(mathingPolicies, policy)
}
}
if len(mathingPolicies) == 0 {
return nil
} else if len(mathingPolicies) == 1 {
return &mathingPolicies[0]
}
// If more than one policy is matched with the same number of matching labels it will be chosen by policies names'
// lexicographic order
firstPolicyNameLexicographicOrder := mathingPolicies[0].Name
var firstPolicyNameLexicographicOrderIdx int
for idx, matchingPolicy := range mathingPolicies {
if matchingPolicy.Name < firstPolicyNameLexicographicOrder {
firstPolicyNameLexicographicOrder = matchingPolicy.Name
firstPolicyNameLexicographicOrderIdx = idx
}
}
return &mathingPolicies[firstPolicyNameLexicographicOrderIdx]
}
// countMatchingLabels checks if a policy matches to a VMI and the number of matching labels.
// In the case that doesMatch is false, matchingLabels needs to be dismissed and not counted on.
func countMatchingLabels(policy *v1alpha1.MigrationPolicy, vmiLabels, namespaceLabels map[string]string) (doesMatch bool, score migrationPolicyMatchScore) {
var matchingVMILabels, matchingNSLabels int
doesMatch = true
if policy.Spec.Selectors == nil {
return false, score
}
countLabelsHelper := func(policyLabels, labelsToMatch map[string]string) (matchingLabels int) {
for policyKey, policyValue := range policyLabels {
value, exists := labelsToMatch[policyKey]
if exists && value == policyValue {
matchingLabels++
} else {
doesMatch = false
return
}
}
return matchingLabels
}
areSelectorsAndLabelsNotNil := func(selector v1alpha1.LabelSelector, labels map[string]string) bool {
return selector != nil && labels != nil
}
if areSelectorsAndLabelsNotNil(policy.Spec.Selectors.VirtualMachineInstanceSelector, vmiLabels) {
matchingVMILabels = countLabelsHelper(policy.Spec.Selectors.VirtualMachineInstanceSelector, vmiLabels)
}
if doesMatch && areSelectorsAndLabelsNotNil(policy.Spec.Selectors.NamespaceSelector, vmiLabels) {
matchingNSLabels = countLabelsHelper(policy.Spec.Selectors.NamespaceSelector, namespaceLabels)
}
if doesMatch {
score = migrationPolicyMatchScore{matchingVMILabels: matchingVMILabels, matchingNSLabels: matchingNSLabels}
}
return doesMatch, score
}
package node
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/util/lookup"
)
const (
// NodeUnresponsiveReason is in various places as reason to indicate that
// an action was taken because virt-handler became unresponsive.
NodeUnresponsiveReason = "NodeUnresponsive"
)
// Controller is the main Controller struct.
type Controller struct {
clientset kubecli.KubevirtClient
Queue workqueue.TypedRateLimitingInterface[string]
nodeStore cache.Store
vmiStore cache.Store
recorder record.EventRecorder
heartBeatTimeout time.Duration
recheckInterval time.Duration
hasSynced func() bool
}
// NewController creates a new instance of the NodeController struct.
func NewController(clientset kubecli.KubevirtClient, nodeInformer cache.SharedIndexInformer, vmiInformer cache.SharedIndexInformer, recorder record.EventRecorder) (*Controller, error) {
c := &Controller{
clientset: clientset,
Queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-node"},
),
nodeStore: nodeInformer.GetStore(),
vmiStore: vmiInformer.GetStore(),
recorder: recorder,
heartBeatTimeout: 5 * time.Minute,
recheckInterval: 1 * time.Minute,
}
c.hasSynced = func() bool {
return nodeInformer.HasSynced() && vmiInformer.HasSynced()
}
_, err := nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addNode,
DeleteFunc: c.deleteNode,
UpdateFunc: c.updateNode,
})
if err != nil {
return nil, err
}
_, err = vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVirtualMachine,
DeleteFunc: func(_ interface{}) { /* nothing to do */ },
UpdateFunc: c.updateVirtualMachine,
})
if err != nil {
return nil, err
}
return c, nil
}
func (c *Controller) addNode(obj interface{}) {
c.enqueueNode(obj)
}
func (c *Controller) deleteNode(obj interface{}) {
c.enqueueNode(obj)
}
func (c *Controller) updateNode(_, curr interface{}) {
c.enqueueNode(curr)
}
func (c *Controller) enqueueNode(obj interface{}) {
logger := log.Log
node := obj.(*v1.Node)
key, err := controller.KeyFunc(node)
if err != nil {
logger.Object(node).Reason(err).Error("Failed to extract key from node.")
return
}
c.Queue.Add(key)
}
func (c *Controller) addVirtualMachine(obj interface{}) {
vmi := obj.(*virtv1.VirtualMachineInstance)
if vmi.Status.NodeName != "" {
c.Queue.Add(vmi.Status.NodeName)
}
}
func (c *Controller) updateVirtualMachine(_, curr interface{}) {
currVMI := curr.(*virtv1.VirtualMachineInstance)
if currVMI.Status.NodeName != "" {
c.Queue.Add(currVMI.Status.NodeName)
}
}
// Run runs the passed in NodeController.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.Queue.ShutDown()
log.Log.Info("Starting node controller.")
// Wait for cache sync before we start the node controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping node controller.")
}
func (c *Controller) runWorker() {
for c.Execute() {
}
}
// Execute runs commands from the controller queue, if there is
// an error it requeues the command. Returns false if the queue
// is empty.
func (c *Controller) Execute() bool {
key, quit := c.Queue.Get()
if quit {
return false
}
defer c.Queue.Done(key)
err := c.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing node %v", key)
c.Queue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed node %v", key)
c.Queue.Forget(key)
}
return true
}
func (c *Controller) execute(key string) error {
logger := log.DefaultLogger()
obj, nodeExists, err := c.nodeStore.GetByKey(key)
if err != nil {
return err
}
var node *v1.Node
if nodeExists {
node = obj.(*v1.Node)
logger = logger.Object(node)
} else {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err == nil {
params := []string{}
if namespace != "" {
params = append(params, "namespace", namespace)
}
params = append(params, "name", name)
params = append(params, "kind", "Node")
logger = logger.With(params)
}
}
unresponsive, err := isNodeUnresponsive(node, c.heartBeatTimeout)
if err != nil {
logger.Reason(err).Error("Failed to determine if node is responsive, will not reenqueue")
return nil
}
if unresponsive {
if nodeIsSchedulable(node) {
if err := c.markNodeAsUnresponsive(node, logger); err != nil {
return err
}
}
err = c.checkNodeForOrphanedAndErroredVMIs(key, node, logger)
if err != nil {
return err
}
}
c.requeueIfExists(key, node)
return nil
}
func nodeIsSchedulable(node *v1.Node) bool {
if node == nil {
return false
}
return node.Labels[virtv1.NodeSchedulable] == "true"
}
func (c *Controller) checkNodeForOrphanedAndErroredVMIs(nodeName string, node *v1.Node, logger *log.FilteredLogger) error {
vmis, err := lookup.ActiveVirtualMachinesOnNode(c.clientset, nodeName)
if err != nil {
logger.Reason(err).Error("Failed fetching vmis for node")
return err
}
if len(vmis) == 0 {
c.requeueIfExists(nodeName, node)
return nil
}
err = c.createEventIfNodeHasOrphanedVMIs(node, vmis)
if err != nil {
logger.Reason(err).Error("checking virt-handler for node")
return err
}
return c.checkVirtLauncherPodsAndUpdateVMIStatus(nodeName, vmis, logger)
}
func (c *Controller) checkVirtLauncherPodsAndUpdateVMIStatus(nodeName string, vmis []*virtv1.VirtualMachineInstance, logger *log.FilteredLogger) error {
pods, err := c.alivePodsOnNode(nodeName)
if err != nil {
logger.Reason(err).Error("Failed fetch pods for node")
return err
}
vmis = filterStuckVirtualMachinesWithoutPods(vmis, pods)
return c.updateVMIWithFailedStatus(vmis, logger)
}
func (c *Controller) updateVMIWithFailedStatus(vmis []*virtv1.VirtualMachineInstance, logger *log.FilteredLogger) error {
errs := []string{}
// Do sequential updates, we don't want to create update storms in situations where something might already be wrong
for _, vmi := range vmis {
err := c.createAndApplyFailedVMINodeUnresponsivePatch(vmi, logger)
if err != nil {
errs = append(errs, fmt.Sprintf("failed to move vmi %s in namespace %s to final state: %v", vmi.Name, vmi.Namespace, err))
}
}
if len(errs) > 0 {
return fmt.Errorf("%v", strings.Join(errs, "; "))
}
return nil
}
func (c *Controller) createAndApplyFailedVMINodeUnresponsivePatch(vmi *virtv1.VirtualMachineInstance, logger *log.FilteredLogger) error {
c.recorder.Event(vmi, v1.EventTypeNormal, NodeUnresponsiveReason, fmt.Sprintf("virt-handler on node %s is not responsive, marking VMI as failed", vmi.Status.NodeName))
logger.V(2).Infof("Moving vmi %s in namespace %s on unresponsive node to failed state", vmi.Name, vmi.Namespace)
patchBytes, err := patch.New(patch.WithReplace("/status/phase", virtv1.Failed),
patch.WithAdd("/status/reason", NodeUnresponsiveReason)).GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
logger.Reason(err).Errorf("Failed to move vmi %s in namespace %s to final state", vmi.Name, vmi.Namespace)
return err
}
return nil
}
func (c *Controller) requeueIfExists(key string, node *v1.Node) {
if node == nil {
return
}
c.Queue.AddAfter(key, c.recheckInterval)
}
func (c *Controller) markNodeAsUnresponsive(node *v1.Node, logger *log.FilteredLogger) error {
c.recorder.Event(node, v1.EventTypeNormal, NodeUnresponsiveReason, "virt-handler is not responsive, marking node as unresponsive")
logger.V(4).Infof("Marking node %s as unresponsive", node.Name)
data := []byte(fmt.Sprintf(`{"metadata": { "labels": {"%s": "false"}}}`, virtv1.NodeSchedulable))
_, err := c.clientset.CoreV1().Nodes().Patch(context.Background(), node.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
if err != nil {
logger.Reason(err).Error("Failed to mark node as unschedulable")
return fmt.Errorf("failed to mark node %s as unschedulable: %v", node.Name, err)
}
return nil
}
func (c *Controller) createEventIfNodeHasOrphanedVMIs(node *v1.Node, vmis []*virtv1.VirtualMachineInstance) error {
// node is not running any vmis so we don't need to check anything else
if len(vmis) == 0 || node == nil {
return nil
}
// query for a virt-handler pod on the node
handlerNodeSelector := fields.ParseSelectorOrDie("spec.nodeName=" + node.GetName())
virtHandlerSelector := fields.ParseSelectorOrDie("kubevirt.io=virt-handler")
pods, err := c.clientset.CoreV1().Pods(v1.NamespaceAll).List(context.Background(), metav1.ListOptions{
FieldSelector: handlerNodeSelector.String(),
LabelSelector: virtHandlerSelector.String(),
})
if err != nil {
return err
}
// node is running the virt-handler
if len(pods.Items) != 0 {
return nil
}
running, err := checkDaemonSetStatus(c.clientset, virtHandlerSelector)
if err != nil {
return err
}
// the virt-handler DaemonsSet is not running as expect so we can't know for sure
// if a virt-handler pod will be ran on this node
if !running {
c.requeueIfExists(node.GetName(), node)
return nil
}
c.recorder.Event(node, v1.EventTypeWarning, NodeUnresponsiveReason, "virt-handler is not present, there are orphaned vmis on this node. Run virt-handler on this node to migrate or remove them.")
return nil
}
func checkDaemonSetStatus(clientset kubecli.KubevirtClient, selector fields.Selector) (bool, error) {
dss, err := clientset.AppsV1().DaemonSets(v1.NamespaceAll).List(context.Background(), metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return false, err
}
if len(dss.Items) != 1 {
return false, fmt.Errorf("shouuld only be running one virt-handler DaemonSet")
}
ds := dss.Items[0]
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired != scheduled && desired != ready {
return false, nil
}
return true, nil
}
func (c *Controller) alivePodsOnNode(nodeName string) ([]*v1.Pod, error) {
handlerNodeSelector := fields.ParseSelectorOrDie("spec.nodeName=" + nodeName)
list, err := c.clientset.CoreV1().Pods(v1.NamespaceAll).List(context.Background(), metav1.ListOptions{
FieldSelector: handlerNodeSelector.String(),
})
if err != nil {
return nil, err
}
pods := []*v1.Pod{}
for i := range list.Items {
pod := &list.Items[i]
if controllerRef := metav1.GetControllerOf(pod); !isControlledByVMI(controllerRef) {
continue
}
// Some pods get stuck in a pending Termination during shutdown
// due to virt-handler not being available to unmount container disk
// mount propagation. A pod with all containers terminated is not
// considered alive
allContainersTerminated := false
if len(pod.Status.ContainerStatuses) > 0 {
allContainersTerminated = true
for _, status := range pod.Status.ContainerStatuses {
if status.State.Terminated == nil {
allContainersTerminated = false
break
}
}
}
phase := pod.Status.Phase
toAppendPod := !allContainersTerminated && phase != v1.PodFailed && phase != v1.PodSucceeded
if toAppendPod {
pods = append(pods, pod)
continue
}
}
return pods, nil
}
func filterStuckVirtualMachinesWithoutPods(vmis []*virtv1.VirtualMachineInstance, pods []*v1.Pod) []*virtv1.VirtualMachineInstance {
podsPerNamespace := map[string]map[string]*v1.Pod{}
for _, pod := range pods {
podsForVMI, ok := podsPerNamespace[pod.Namespace]
if !ok {
podsForVMI = map[string]*v1.Pod{}
}
if controllerRef := metav1.GetControllerOf(pod); isControlledByVMI(controllerRef) {
podsForVMI[string(controllerRef.UID)] = pod
podsPerNamespace[pod.Namespace] = podsForVMI
}
}
filtered := []*virtv1.VirtualMachineInstance{}
for _, vmi := range vmis {
if podsForVMI, exists := podsPerNamespace[vmi.Namespace]; exists {
if _, exists := podsForVMI[string(vmi.UID)]; exists {
continue
}
}
filtered = append(filtered, vmi)
}
return filtered
}
func isControlledByVMI(controllerRef *metav1.OwnerReference) bool {
return controllerRef != nil && controllerRef.Kind == virtv1.VirtualMachineInstanceGroupVersionKind.Kind
}
func isNodeUnresponsive(node *v1.Node, timeout time.Duration) (bool, error) {
if node == nil {
return true, nil
}
if lastHeartBeat, exists := node.Annotations[virtv1.VirtHandlerHeartbeat]; exists {
timestamp := metav1.Time{}
if err := json.Unmarshal([]byte(`"`+lastHeartBeat+`"`), ×tamp); err != nil {
return false, err
}
if timestamp.Time.Before(metav1.Now().Add(-timeout)) {
return true, nil
}
}
return false, nil
}
// Copyright 2025 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package node
import (
"time"
)
func SetRecheckInternal(ctrl *Controller, milliseconds time.Duration) {
ctrl.recheckInterval = milliseconds
}
package pool
import (
"context"
"encoding/json"
"fmt"
"maps"
"math"
"math/rand"
"slices"
"sort"
"strconv"
"strings"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/trace"
appsv1 "k8s.io/api/apps/v1"
k8score "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/pointer"
virtv1 "kubevirt.io/api/core/v1"
poolv1 "kubevirt.io/api/pool/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
traceUtils "kubevirt.io/kubevirt/pkg/util/trace"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/common"
)
// Controller is the main Controller struct.
type Controller struct {
clientset kubecli.KubevirtClient
queue workqueue.TypedRateLimitingInterface[string]
vmIndexer cache.Indexer
vmiStore cache.Store
pvcStore cache.Store
dvStore cache.Store
poolIndexer cache.Indexer
revisionIndexer cache.Indexer
recorder record.EventRecorder
expectations *controller.UIDTrackingControllerExpectations
burstReplicas uint
hasSynced func() bool
}
const (
FailedUpdateVirtualMachineReason = "FailedUpdate"
SuccessfulUpdateVirtualMachineReason = "SuccessfulUpdate"
defaultAddDelay = 1 * time.Second
defaultRetryDelay = 3 * time.Second
defaultStartUpFailureThreshold = 3
minFailingToStartDuration = 5 * time.Minute
)
const (
FailedScaleOutReason = "FailedScaleOut"
FailedScaleInReason = "FailedScaleIn"
FailedUpdateReason = "FailedUpdate"
FailedRevisionPruningReason = "FailedRevisionPruning"
SuccessfulPausedPoolReason = "SuccessfulPaused"
SuccessfulResumePoolReason = "SuccessfulResume"
)
var virtControllerPoolWorkQueueTracer = &traceUtils.Tracer{Threshold: time.Second}
// NewController creates a new instance of the PoolController struct.
func NewController(clientset kubecli.KubevirtClient,
vmiInformer cache.SharedIndexInformer,
vmInformer cache.SharedIndexInformer,
poolInformer cache.SharedIndexInformer,
pvcInformer cache.SharedIndexInformer,
dvInformer cache.SharedIndexInformer,
revisionInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
burstReplicas uint) (*Controller, error) {
c := &Controller{
clientset: clientset,
queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-pool"},
),
poolIndexer: poolInformer.GetIndexer(),
vmiStore: vmiInformer.GetStore(),
vmIndexer: vmInformer.GetIndexer(),
pvcStore: pvcInformer.GetStore(),
dvStore: dvInformer.GetStore(),
revisionIndexer: revisionInformer.GetIndexer(),
recorder: recorder,
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
burstReplicas: burstReplicas,
}
c.hasSynced = func() bool {
return poolInformer.HasSynced() && vmInformer.HasSynced() && vmiInformer.HasSynced() && revisionInformer.HasSynced() && pvcInformer.HasSynced() && dvInformer.HasSynced()
}
_, err := poolInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPool,
DeleteFunc: c.deletePool,
UpdateFunc: c.updatePool,
})
if err != nil {
return nil, err
}
_, err = vmInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVMHandler,
DeleteFunc: c.deleteVMHandler,
UpdateFunc: c.updateVMHandler,
})
if err != nil {
return nil, err
}
_, err = revisionInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addRevisionHandler,
UpdateFunc: c.updateRevisionHandler,
})
if err != nil {
return nil, err
}
_, err = vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVMIHandler,
UpdateFunc: c.updateVMIHandler,
})
if err != nil {
return nil, err
}
return c, nil
}
func (c *Controller) resolveVMIControllerRef(namespace string, controllerRef *metav1.OwnerReference) *virtv1.VirtualMachine {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != virtv1.VirtualMachineGroupVersionKind.Kind {
return nil
}
vm, exists, err := c.vmIndexer.GetByKey(controller.NamespacedKey(namespace, controllerRef.Name))
if err != nil {
return nil
}
if !exists {
return nil
}
if vm.(*virtv1.VirtualMachine).UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return vm.(*virtv1.VirtualMachine)
}
func (c *Controller) addVMIHandler(obj interface{}) {
vmi := obj.(*virtv1.VirtualMachineInstance)
if vmi.DeletionTimestamp != nil {
return
}
vmiControllerRef := metav1.GetControllerOf(vmi)
if vmiControllerRef == nil {
return
}
log.Log.Object(vmi).V(4).Info("Looking for VirtualMachineInstance Ref")
vm := c.resolveVMIControllerRef(vmi.Namespace, vmiControllerRef)
if vm == nil {
// VMI is not controlled by a VM
return
}
vmControllerRef := metav1.GetControllerOf(vm)
if vmControllerRef == nil {
return
}
pool := c.resolveControllerRef(vm.Namespace, vmControllerRef)
if pool == nil {
// VM is not controlled by a pool
return
}
vmRevisionName, vmOk := vm.Spec.Template.ObjectMeta.Labels[virtv1.VirtualMachinePoolRevisionName]
vmiRevisionName, vmiOk := vmi.Labels[virtv1.VirtualMachinePoolRevisionName]
if vmOk && vmiOk && vmRevisionName == vmiRevisionName {
// nothing to do here, VMI is up-to-date with VM's Template
return
}
// enqueue the Pool due to a VMI detected that isn't up to date
c.enqueuePool(pool)
}
func (c *Controller) updateVMIHandler(old, cur interface{}) {
c.addVMIHandler(cur)
}
// When a revision is created, enqueue the pool that manages it and update its expectations.
func (c *Controller) addRevisionHandler(obj interface{}) {
cr := obj.(*appsv1.ControllerRevision)
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(cr); controllerRef != nil {
pool := c.resolveControllerRef(cr.Namespace, controllerRef)
if pool == nil {
return
}
poolKey, err := controller.KeyFunc(pool)
if err != nil {
return
}
c.expectations.CreationObserved(poolKey)
c.enqueuePool(pool)
return
}
}
func (c *Controller) updateRevisionHandler(old, cur interface{}) {
cr := cur.(*appsv1.ControllerRevision)
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(cr); controllerRef != nil {
pool := c.resolveControllerRef(cr.Namespace, controllerRef)
if pool == nil {
return
}
c.enqueuePool(pool)
return
}
}
// When a vm is created, enqueue the pool that manages it and update its expectations.
func (c *Controller) addVMHandler(obj interface{}) {
vm := obj.(*virtv1.VirtualMachine)
if vm.DeletionTimestamp != nil {
// on a restart of the controller manager, it's possible a new vm shows up in a state that
// is already pending deletion. Prevent the vm from being a creation observation.
c.deleteVMHandler(vm)
return
}
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(vm); controllerRef != nil {
pool := c.resolveControllerRef(vm.Namespace, controllerRef)
if pool == nil {
return
}
poolKey, err := controller.KeyFunc(pool)
if err != nil {
return
}
log.Log.V(4).Object(vm).Infof("VirtualMachine created")
c.expectations.CreationObserved(poolKey)
c.enqueuePool(pool)
return
}
}
// When a vm is updated, figure out what pool/s manage it and wake them
// up. If the labels of the vm have changed we need to awaken both the old
// and new pool. old and cur must be *metav1.VirtualMachine types.
func (c *Controller) updateVMHandler(old, cur interface{}) {
curVM := cur.(*virtv1.VirtualMachine)
oldVM := old.(*virtv1.VirtualMachine)
if curVM.ResourceVersion == oldVM.ResourceVersion {
return
}
labelChanged := !equality.Semantic.DeepEqual(curVM.Labels, oldVM.Labels)
if curVM.DeletionTimestamp != nil {
c.deleteVMHandler(curVM)
if labelChanged {
c.deleteVMHandler(oldVM)
}
return
}
curControllerRef := metav1.GetControllerOf(curVM)
oldControllerRef := metav1.GetControllerOf(oldVM)
controllerRefChanged := !equality.Semantic.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged && oldControllerRef != nil {
// The ControllerRef was changed. Sync the old controller, if any.
if pool := c.resolveControllerRef(oldVM.Namespace, oldControllerRef); pool != nil {
c.enqueuePool(pool)
}
}
// If it has a ControllerRef, that's all that matters.
if curControllerRef != nil {
pool := c.resolveControllerRef(curVM.Namespace, curControllerRef)
if pool == nil {
return
}
log.Log.V(4).Object(curVM).Infof("VirtualMachine updated")
c.enqueuePool(pool)
return
}
}
// When a vm is deleted, enqueue the pool that manages the vm and update its expectations.
// obj could be an *metav1.VirtualMachine, or a DeletionFinalStateUnknown marker item.
func (c *Controller) deleteVMHandler(obj interface{}) {
vm, ok := obj.(*virtv1.VirtualMachine)
// When a delete is dropped, the relist will notice a vm in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the vm
// changed labels the new Pool will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error("Failed to process delete notification")
return
}
vm, ok = tombstone.Obj.(*virtv1.VirtualMachine)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a vm %#v", obj)).Error("Failed to process delete notification")
return
}
}
controllerRef := metav1.GetControllerOf(vm)
if controllerRef == nil {
return
}
pool := c.resolveControllerRef(vm.Namespace, controllerRef)
if pool == nil {
return
}
poolKey, err := controller.KeyFunc(pool)
if err != nil {
return
}
c.expectations.DeletionObserved(poolKey, controller.VirtualMachineKey(vm))
c.enqueuePool(pool)
}
func (c *Controller) addPool(obj interface{}) {
c.enqueuePool(obj)
}
func (c *Controller) deletePool(obj interface{}) {
c.enqueuePool(obj)
}
func (c *Controller) updatePool(_, curr interface{}) {
c.enqueuePool(curr)
}
func (c *Controller) enqueuePool(obj interface{}) {
logger := log.Log
pool := obj.(*poolv1.VirtualMachinePool)
key, err := controller.KeyFunc(pool)
if err != nil {
logger.Object(pool).Reason(err).Error("Failed to extract key from pool.")
return
}
// Delay prevents pool from being reconciled too often
c.queue.AddAfter(key, defaultAddDelay)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *poolv1.VirtualMachinePool {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != poolv1.VirtualMachinePoolKind {
return nil
}
pool, exists, err := c.poolIndexer.GetByKey(controller.NamespacedKey(namespace, controllerRef.Name))
if err != nil {
return nil
}
if !exists {
return nil
}
if pool.(*poolv1.VirtualMachinePool).UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return pool.(*poolv1.VirtualMachinePool)
}
// listControllerFromNamespace takes a namespace and returns all Pools from the Pool cache which run in this namespace
func (c *Controller) listControllerFromNamespace(namespace string) ([]*poolv1.VirtualMachinePool, error) {
objs, err := c.poolIndexer.ByIndex(cache.NamespaceIndex, namespace)
if err != nil {
return nil, err
}
pools := []*poolv1.VirtualMachinePool{}
for _, obj := range objs {
pool := obj.(*poolv1.VirtualMachinePool)
pools = append(pools, pool)
}
return pools, nil
}
// getMatchingController returns the first Pool which matches the labels of the VirtualMachine from the listener cache.
// If there are no matching controllers, a NotFound error is returned.
func (c *Controller) getMatchingControllers(vm *virtv1.VirtualMachine) (pools []*poolv1.VirtualMachinePool) {
logger := log.Log
controllers, err := c.listControllerFromNamespace(vm.ObjectMeta.Namespace)
if err != nil {
return nil
}
for _, pool := range controllers {
selector, err := metav1.LabelSelectorAsSelector(pool.Spec.Selector)
if err != nil {
logger.Object(pool).Reason(err).Error("Failed to parse label selector from pool.")
continue
}
if selector.Matches(labels.Set(vm.ObjectMeta.Labels)) {
pools = append(pools, pool)
}
}
return pools
}
// Run runs the passed in PoolController.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.queue.ShutDown()
log.Log.Info("Starting pool controller.")
// Wait for cache sync before we start the pool controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping pool controller.")
}
func (c *Controller) runWorker() {
for c.Execute() {
}
}
func (c *Controller) listVMsFromNamespace(namespace string) ([]*virtv1.VirtualMachine, error) {
objs, err := c.vmIndexer.ByIndex(cache.NamespaceIndex, namespace)
if err != nil {
return nil, err
}
vms := []*virtv1.VirtualMachine{}
for _, obj := range objs {
vms = append(vms, obj.(*virtv1.VirtualMachine))
}
return vms, nil
}
func (c *Controller) calcDiff(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine) int {
wantedReplicas := int32(1)
if pool.Spec.Replicas != nil {
wantedReplicas = *pool.Spec.Replicas
}
return len(vms) - int(wantedReplicas)
}
func filterRunningVMs(vms []*virtv1.VirtualMachine) []*virtv1.VirtualMachine {
filtered := []*virtv1.VirtualMachine{}
for _, vm := range vms {
if vm.DeletionTimestamp == nil {
filtered = append(filtered, vm)
}
}
return filtered
}
func filterDeletingVMs(vms []*virtv1.VirtualMachine) []*virtv1.VirtualMachine {
filtered := []*virtv1.VirtualMachine{}
for _, vm := range vms {
if vm.DeletionTimestamp != nil {
filtered = append(filtered, vm)
}
}
return filtered
}
// filterReadyVMs takes a list of VMs and returns all VMs which are in ready state.
func (c *Controller) filterReadyVMs(vms []*virtv1.VirtualMachine) []*virtv1.VirtualMachine {
return filterVMs(vms, func(vm *virtv1.VirtualMachine) bool {
return controller.NewVirtualMachineConditionManager().HasConditionWithStatus(vm, virtv1.VirtualMachineConditionType(k8score.PodReady), k8score.ConditionTrue)
})
}
func (c *Controller) filterNotReadyVMs(vms []*virtv1.VirtualMachine) []*virtv1.VirtualMachine {
return filterVMs(vms, func(vm *virtv1.VirtualMachine) bool {
return !controller.NewVirtualMachineConditionManager().HasConditionWithStatus(vm, virtv1.VirtualMachineConditionType(k8score.PodReady), k8score.ConditionTrue)
})
}
func filterVMs(vms []*virtv1.VirtualMachine, f func(vmi *virtv1.VirtualMachine) bool) []*virtv1.VirtualMachine {
filtered := []*virtv1.VirtualMachine{}
for _, vm := range vms {
if f(vm) {
filtered = append(filtered, vm)
}
}
return filtered
}
func resolveScaleInPolicy(scaleInStrategy *poolv1.VirtualMachinePoolScaleInStrategy) poolv1.VirtualMachinePoolSortPolicy {
if scaleInStrategy == nil || scaleInStrategy.Proactive == nil {
return poolv1.VirtualMachinePoolSortPolicyRandom
}
return resolveSelectionPolicy(scaleInStrategy.Proactive.SelectionPolicy)
}
func resolveSortPolicyForUpdate(updateStrategy *poolv1.VirtualMachinePoolProactiveUpdateStrategy) poolv1.VirtualMachinePoolSortPolicy {
if updateStrategy == nil {
return poolv1.VirtualMachinePoolSortPolicyRandom
}
return resolveSelectionPolicy(updateStrategy.SelectionPolicy)
}
func resolveSelectionPolicy(selectionPolicy *poolv1.VirtualMachinePoolSelectionPolicy) poolv1.VirtualMachinePoolSortPolicy {
if selectionPolicy == nil ||
selectionPolicy.SortPolicy == nil {
return poolv1.VirtualMachinePoolSortPolicyRandom
}
return *selectionPolicy.SortPolicy
}
func sortVMsBasedOnSortPolicy(vms []*virtv1.VirtualMachine, sortPolicy poolv1.VirtualMachinePoolSortPolicy) {
switch sortPolicy {
case poolv1.VirtualMachinePoolSortPolicyAscendingOrder:
sortVMsByOrdinal(vms, true)
case poolv1.VirtualMachinePoolSortPolicyDescendingOrder:
sortVMsByOrdinal(vms, false)
case poolv1.VirtualMachinePoolSortPolicyNewest:
sortVMsByNewestFirst(vms)
case poolv1.VirtualMachinePoolSortPolicyOldest:
sortVMsByOldestFirst(vms)
case poolv1.VirtualMachinePoolSortPolicyRandom:
sortVMsRandom(vms)
default:
log.Log.Warningf("Sorting VMs based on random policy as the provided sort policy is invalid: %s", sortPolicy)
sortVMsRandom(vms)
}
}
func sortVMsByNewestFirst(vms []*virtv1.VirtualMachine) {
sort.Slice(vms, func(i, j int) bool {
return vms[i].CreationTimestamp.Time.After(vms[j].CreationTimestamp.Time)
})
}
func sortVMsByOldestFirst(vms []*virtv1.VirtualMachine) {
sort.Slice(vms, func(i, j int) bool {
return vms[i].CreationTimestamp.Before(&vms[j].CreationTimestamp)
})
}
func sortVMsByOrdinal(vms []*virtv1.VirtualMachine, ascending bool) {
sort.Slice(vms, func(i, j int) bool {
ordinalI, errI := indexFromName(vms[i].Name)
ordinalJ, errJ := indexFromName(vms[j].Name)
if errI != nil {
ordinalI = 0
}
if errJ != nil {
ordinalJ = 0
}
if ascending {
return ordinalI < ordinalJ
}
return ordinalI > ordinalJ
})
}
func sortVMsRandom(vms []*virtv1.VirtualMachine) {
rand.Shuffle(len(vms), func(i, j int) {
vms[i], vms[j] = vms[j], vms[i]
})
}
func filterVMsBasedOnSelectors(vms []*virtv1.VirtualMachine, selectors *poolv1.VirtualMachinePoolSelectors) ([]*virtv1.VirtualMachine, error) {
var labelSelector labels.Selector
var nodeSelector labels.Selector
var err error
if selectors.LabelSelector != nil {
labelSelector, err = metav1.LabelSelectorAsSelector(selectors.LabelSelector)
if err != nil {
return nil, fmt.Errorf("failed to parse label selector from pool: %v", err)
}
}
if selectors.NodeSelectorRequirementMatcher != nil {
nodeSelector, err = nodeSelectorRequirementsAsSelector(selectors.NodeSelectorRequirementMatcher)
if err != nil {
return nil, fmt.Errorf("failed to parse node selector from pool: %v", err)
}
}
if labelSelector == nil && nodeSelector == nil {
return vms, nil
}
var filteredVms []*virtv1.VirtualMachine
for _, vm := range vms {
if labelSelector != nil && !labelSelector.Matches(labels.Set(vm.Spec.Template.ObjectMeta.Labels)) {
continue
}
if nodeSelector != nil && !nodeSelector.Matches(labels.Set(vm.Spec.Template.Spec.NodeSelector)) {
continue
}
filteredVms = append(filteredVms, vm)
}
return filteredVms, nil
}
func (c *Controller) proactiveScaleIn(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine, count int) error {
if isUnmanaged(pool) || isOpportunisticScaleInEnabled(pool) {
return nil
}
eligibleVMs := filterRunningVMs(vms)
// make sure we count already deleting VMs here during scale in.
count = count - (len(vms) - len(eligibleVMs))
return c.scaleIn(pool, eligibleVMs, count)
}
func (c *Controller) scaleIn(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine, count int) error {
if len(vms) == 0 || count == 0 {
return nil
} else if count > len(vms) {
count = len(vms)
}
poolKey, err := controller.KeyFunc(pool)
if err != nil {
return err
}
eligibleVMs := filterRunningVMs(vms)
// make sure we count already deleting VMs here during scale in.
count = count - (len(vms) - len(eligibleVMs))
if len(eligibleVMs) == 0 || count == 0 {
return nil
} else if count > len(eligibleVMs) {
count = len(eligibleVMs)
}
sortPolicy := resolveScaleInPolicy(pool.Spec.ScaleInStrategy)
sortVMsBasedOnSortPolicy(eligibleVMs, sortPolicy)
if hasSelectorsSelectionPolicyForScaleIn(pool) {
eligibleVMs, err = filterVMsBasedOnSelectors(eligibleVMs, pool.Spec.ScaleInStrategy.Proactive.SelectionPolicy.Selectors)
if err != nil {
return err
}
}
log.Log.Object(pool).Infof("Removing %d VMs from pool", count)
var wg sync.WaitGroup
deleteList := eligibleVMs[0:count]
c.expectations.ExpectDeletions(poolKey, controller.VirtualMachineKeys(deleteList))
wg.Add(len(deleteList))
errChan := make(chan error, len(deleteList))
for i := range deleteList {
go func(idx int) {
defer wg.Done()
vm := deleteList[idx]
if err := c.clientset.VirtualMachine(vm.Namespace).Delete(context.Background(), vm.Name, metav1.DeleteOptions{}); err != nil {
c.expectations.DeletionObserved(poolKey, controller.VirtualMachineKey(vm))
c.recorder.Eventf(pool, k8score.EventTypeWarning, common.FailedDeleteVirtualMachineReason, "Error deleting virtual machine %s/%s: %v", vm.Namespace, vm.Name, err)
errChan <- err
return
}
if err := c.statePreservationCleanupforVM(pool, vm, isStatePreservationEnabled(resolveProactiveScaleInStatePreservation(pool))); err != nil {
c.recorder.Eventf(pool, k8score.EventTypeWarning, FailedUpdateVirtualMachineReason, "Error preserving state of VM %s/%s: %v", vm.Namespace, vm.Name, err)
errChan <- err
}
c.recorder.Eventf(pool, k8score.EventTypeNormal, common.SuccessfulDeleteVirtualMachineReason, "Deleted VM %s/%s with uid %v from pool", vm.Namespace, vm.Name, vm.ObjectMeta.UID)
log.Log.Object(pool).Infof("Deleted vm %s/%s from pool", vm.Namespace, vm.Name)
}(i)
}
wg.Wait()
select {
case err := <-errChan:
// Only return the first error which occurred. We log the rest
return err
default:
}
return nil
}
func (c *Controller) opportunisticScaleIn(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine, preserveState bool) error {
if isUnmanaged(pool) {
return nil
}
deletingVMs := filterDeletingVMs(vms)
if len(deletingVMs) == 0 {
return nil
}
var lastErr error
for _, vm := range deletingVMs {
if err := c.statePreservationCleanupforVM(pool, vm, preserveState); err != nil {
lastErr = err
}
log.Log.Object(vm).Infof("Removing VM %s/%s from pool", vm.Namespace, vm.Name)
}
return lastErr
}
func generateVMName(index int, baseName string) string {
return fmt.Sprintf("%s-%d", baseName, index)
}
func calculateNewVMNames(count int, baseName string, namespace string, vmStore cache.Store) []string {
var newNames []string
// generate `count` new unused VM names
curIndex := 0
for n := 0; n < count; n++ {
// find next unused index starting where we left off last
i := curIndex
for {
name := generateVMName(i, baseName)
vmKey := controller.NamespacedKey(namespace, name)
_, exists, _ := vmStore.GetByKey(vmKey)
if !exists {
newNames = append(newNames, name)
curIndex = i + 1
break
}
i++
}
}
return newNames
}
func poolOwnerRef(pool *poolv1.VirtualMachinePool) metav1.OwnerReference {
t := pointer.P(true)
gvk := schema.GroupVersionKind{Group: poolv1.SchemeGroupVersion.Group, Version: poolv1.SchemeGroupVersion.Version, Kind: poolv1.VirtualMachinePoolKind}
return metav1.OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: pool.ObjectMeta.Name,
UID: pool.ObjectMeta.UID,
Controller: t,
BlockOwnerDeletion: t,
}
}
func indexFromName(name string) (int, error) {
slice := strings.Split(name, "-")
return strconv.Atoi(slice[len(slice)-1])
}
func indexVMSpec(poolSpec *poolv1.VirtualMachinePoolSpec, idx int) *virtv1.VirtualMachineSpec {
spec := poolSpec.VirtualMachineTemplate.Spec.DeepCopy()
dvNameMap := map[string]string{}
for i := range spec.DataVolumeTemplates {
indexName := fmt.Sprintf("%s-%d", spec.DataVolumeTemplates[i].Name, idx)
dvNameMap[spec.DataVolumeTemplates[i].Name] = indexName
spec.DataVolumeTemplates[i].Name = indexName
}
appendIndexToConfigMapRefs := false
appendIndexToSecretRefs := false
if poolSpec.NameGeneration != nil {
if poolSpec.NameGeneration.AppendIndexToConfigMapRefs != nil {
appendIndexToConfigMapRefs = *poolSpec.NameGeneration.AppendIndexToConfigMapRefs
}
if poolSpec.NameGeneration.AppendIndexToSecretRefs != nil {
appendIndexToSecretRefs = *poolSpec.NameGeneration.AppendIndexToSecretRefs
}
}
suffix := "-" + strconv.Itoa(idx)
for i, volume := range spec.Template.Spec.Volumes {
if volume.VolumeSource.PersistentVolumeClaim != nil {
indexName, ok := dvNameMap[volume.VolumeSource.PersistentVolumeClaim.ClaimName]
if ok {
spec.Template.Spec.Volumes[i].PersistentVolumeClaim.ClaimName = indexName
}
} else if volume.VolumeSource.DataVolume != nil {
indexName, ok := dvNameMap[volume.VolumeSource.DataVolume.Name]
if ok {
spec.Template.Spec.Volumes[i].DataVolume.Name = indexName
}
} else if volume.VolumeSource.ConfigMap != nil && appendIndexToConfigMapRefs {
volume.VolumeSource.ConfigMap.Name += suffix
} else if volume.VolumeSource.Secret != nil && appendIndexToSecretRefs {
volume.VolumeSource.Secret.SecretName += suffix
} else if volume.VolumeSource.CloudInitNoCloud != nil && appendIndexToSecretRefs {
if volume.VolumeSource.CloudInitNoCloud.UserDataSecretRef != nil {
volume.CloudInitNoCloud.UserDataSecretRef.Name += suffix
}
if volume.VolumeSource.CloudInitNoCloud.NetworkDataSecretRef != nil {
volume.CloudInitNoCloud.NetworkDataSecretRef.Name += suffix
}
} else if volume.VolumeSource.CloudInitConfigDrive != nil && appendIndexToSecretRefs {
if volume.VolumeSource.CloudInitConfigDrive.UserDataSecretRef != nil {
volume.CloudInitConfigDrive.UserDataSecretRef.Name += suffix
}
if volume.VolumeSource.CloudInitConfigDrive.NetworkDataSecretRef != nil {
volume.CloudInitConfigDrive.NetworkDataSecretRef.Name += suffix
}
}
}
return spec
}
func injectPoolRevisionLabelsIntoVM(vm *virtv1.VirtualMachine, revisionName string) *virtv1.VirtualMachine {
if vm.Labels == nil {
vm.Labels = map[string]string{}
}
if vm.Spec.Template.ObjectMeta.Labels == nil {
vm.Spec.Template.ObjectMeta.Labels = map[string]string{}
}
vm.Labels[virtv1.VirtualMachinePoolRevisionName] = revisionName
vm.Spec.Template.ObjectMeta.Labels[virtv1.VirtualMachinePoolRevisionName] = revisionName
return vm
}
func getRevisionName(pool *poolv1.VirtualMachinePool) string {
return fmt.Sprintf("%s-%d", pool.Name, pool.Generation)
}
func (c *Controller) ensureControllerRevision(pool *poolv1.VirtualMachinePool) (string, error) {
poolKey, err := controller.KeyFunc(pool)
if err != nil {
return "", err
}
revisionName := getRevisionName(pool)
_, alreadyExists, err := c.getControllerRevision(pool.Namespace, revisionName)
if err != nil {
return "", err
} else if alreadyExists {
// already created
return revisionName, nil
}
bytes, err := json.Marshal(&pool.Spec)
if err != nil {
return "", err
}
cr := &appsv1.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{
Name: revisionName,
Namespace: pool.Namespace,
OwnerReferences: []metav1.OwnerReference{poolOwnerRef(pool)},
},
Data: runtime.RawExtension{Raw: bytes},
Revision: pool.ObjectMeta.Generation,
}
c.expectations.RaiseExpectations(poolKey, 1, 0)
_, err = c.clientset.AppsV1().ControllerRevisions(pool.Namespace).Create(context.Background(), cr, metav1.CreateOptions{})
if err != nil {
c.expectations.CreationObserved(poolKey)
return "", err
}
return cr.Name, nil
}
func (c *Controller) getControllerRevision(namespace, name string) (*poolv1.VirtualMachinePoolSpec, bool, error) {
key := controller.NamespacedKey(namespace, name)
storeObj, exists, err := c.revisionIndexer.GetByKey(key)
if !exists || err != nil {
return nil, false, err
}
cr, ok := storeObj.(*appsv1.ControllerRevision)
if !ok {
return nil, false, fmt.Errorf("unexpected resource %+v", storeObj)
}
spec := &poolv1.VirtualMachinePoolSpec{}
err = json.Unmarshal(cr.Data.Raw, spec)
if err != nil {
return nil, false, err
}
return spec, true, nil
}
func (c *Controller) scaleOut(pool *poolv1.VirtualMachinePool, count int) error {
var wg sync.WaitGroup
newNames := calculateNewVMNames(count, pool.Name, pool.Namespace, c.vmIndexer)
revisionName, err := c.ensureControllerRevision(pool)
if err != nil {
return err
}
log.Log.Object(pool).Infof("Adding %d VMs to pool", len(newNames))
poolKey, err := controller.KeyFunc(pool)
if err != nil {
return err
}
// We have to create VMs
c.expectations.RaiseExpectations(poolKey, len(newNames), 0)
wg.Add(len(newNames))
errChan := make(chan error, len(newNames))
for _, name := range newNames {
go func(name string) {
defer wg.Done()
index, err := indexFromName(name)
if err != nil {
errChan <- err
return
}
vm := virtv1.NewVMReferenceFromNameWithNS(pool.Namespace, name)
vm.Labels = maps.Clone(pool.Spec.VirtualMachineTemplate.ObjectMeta.Labels)
vm.Annotations = maps.Clone(pool.Spec.VirtualMachineTemplate.ObjectMeta.Annotations)
vm.Spec = *indexVMSpec(&pool.Spec, index)
vm = injectPoolRevisionLabelsIntoVM(vm, revisionName)
controller.AddFinalizer(vm, poolv1.VirtualMachinePoolControllerFinalizer)
vm.ObjectMeta.OwnerReferences = []metav1.OwnerReference{poolOwnerRef(pool)}
vm, err = c.clientset.VirtualMachine(vm.Namespace).Create(context.Background(), vm, metav1.CreateOptions{})
if err != nil {
c.expectations.CreationObserved(poolKey)
log.Log.Object(pool).Reason(err).Errorf("Failed to add vm %s/%s to pool", pool.Namespace, name)
errChan <- err
return
}
c.recorder.Eventf(pool, k8score.EventTypeNormal, common.SuccessfulCreateVirtualMachineReason, "Created VM %s/%s", vm.Namespace, vm.ObjectMeta.Name)
log.Log.Object(pool).Infof("Adding vm %s/%s to pool", pool.Namespace, name)
}(name)
}
wg.Wait()
select {
case err := <-errChan:
// Only return the first error which occurred. We log the rest
c.recorder.Eventf(pool, k8score.EventTypeWarning, common.FailedCreateVirtualMachineReason, "Error creating VM: %v", err)
return err
default:
}
return nil
}
func (c *Controller) scale(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine) (common.SyncError, bool) {
diff := c.calcDiff(pool, vms)
if diff == 0 {
// if diff is 0, that means the pool is already at the desired state or someone has manually deleted the vm
if err := c.opportunisticScaleIn(pool, vms, isStatePreservationEnabled(resolveOpportunisticScaleInStatePreservation(pool))); err != nil {
return common.NewSyncError(fmt.Errorf("error during opportunistic scale in: %v", err), FailedScaleInReason), false
}
return nil, true
}
maxDiff := int(math.Min(math.Abs(float64(diff)), float64(c.burstReplicas)))
if diff < 0 {
err := c.scaleOut(pool, maxDiff)
if err != nil {
return common.NewSyncError(fmt.Errorf("error during scale out: %v", err), FailedScaleOutReason), false
}
} else {
err := c.proactiveScaleIn(pool, vms, maxDiff)
if err != nil {
return common.NewSyncError(fmt.Errorf("error during scale in: %v", err), FailedScaleInReason), false
}
}
return nil, false
}
func isVMIReady(vmi *virtv1.VirtualMachineInstance) bool {
if vmi.DeletionTimestamp != nil || vmi.Status.Phase != virtv1.Running {
return false
}
return controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceReady, k8score.ConditionTrue)
}
func (c *Controller) getUnavailableVMICount(vms []*virtv1.VirtualMachine) (int, error) {
unavailableCount := 0
for _, vm := range vms {
obj, exists, err := c.vmiStore.GetByKey(controller.NamespacedKey(vm.Namespace, vm.Name))
if err != nil {
return 0, err
}
if !exists {
unavailableCount++
continue
}
vmi := obj.(*virtv1.VirtualMachineInstance)
if !isVMIReady(vmi) {
unavailableCount++
}
}
return unavailableCount, nil
}
func (c *Controller) handleUnhealthyVMIs(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine) error {
notReadyVMs := c.filterNotReadyVMs(vms)
for _, vm := range notReadyVMs {
obj, exists, err := c.vmiStore.GetByKey(controller.NamespacedKey(vm.Namespace, vm.Name))
if err != nil {
return err
}
if exists {
vmi := obj.(*virtv1.VirtualMachineInstance)
if vmi.DeletionTimestamp != nil {
continue
}
updateType, err := c.isOutdatedVMI(vm, vmi)
if err != nil {
return err
}
if err := c.handleResourceUpdate(pool, vm, vmi, updateType); err != nil {
return err
}
}
}
return nil
}
func (c *Controller) opportunisticUpdate(pool *poolv1.VirtualMachinePool, vmOutdatedList []*virtv1.VirtualMachine) error {
var wg sync.WaitGroup
if len(vmOutdatedList) == 0 {
return nil
}
revisionName, err := c.ensureControllerRevision(pool)
if err != nil {
return err
}
wg.Add(len(vmOutdatedList))
errChan := make(chan error, len(vmOutdatedList))
for i := 0; i < len(vmOutdatedList); i++ {
go func(idx int) {
defer wg.Done()
vm := vmOutdatedList[idx]
index, err := indexFromName(vm.Name)
if err != nil {
errChan <- err
return
}
vmCopy := vm.DeepCopy()
vmCopy.Labels = maps.Clone(pool.Spec.VirtualMachineTemplate.ObjectMeta.Labels)
vmCopy.Annotations = maps.Clone(pool.Spec.VirtualMachineTemplate.ObjectMeta.Annotations)
vmCopy.Spec = *indexVMSpec(&pool.Spec, index)
vmCopy = injectPoolRevisionLabelsIntoVM(vmCopy, revisionName)
_, err = c.clientset.VirtualMachine(vmCopy.Namespace).Update(context.Background(), vmCopy, metav1.UpdateOptions{})
if err != nil {
c.recorder.Eventf(pool, k8score.EventTypeWarning, FailedUpdateVirtualMachineReason, "Error updating virtual machine %s/%s: %v", vm.Name, vm.Namespace, err)
log.Log.Object(pool).Reason(err).Errorf("Error encountered during update of vm %s/%s in pool", vmCopy.Namespace, vmCopy.Name)
errChan <- err
return
}
log.Log.Object(pool).Infof("Updated vm %s/%s in pool", vmCopy.Namespace, vmCopy.Name)
c.recorder.Eventf(pool, k8score.EventTypeNormal, SuccessfulUpdateVirtualMachineReason, "Updated VM %s/%s", vm.Namespace, vm.Name)
}(i)
}
wg.Wait()
select {
case err := <-errChan:
// Only return the first error which occurred. We log the rest
return err
default:
}
return nil
}
func calculateMaxUnavailableInt(pool *poolv1.VirtualMachinePool) (int, error) {
maxUnavailable := intstr.FromString("100%")
if pool.Spec.MaxUnavailable != nil {
maxUnavailable = *pool.Spec.MaxUnavailable
}
totalReplicas := int32(1)
if pool.Spec.Replicas != nil {
totalReplicas = *pool.Spec.Replicas
}
maxUnavailableInt := 0
if maxUnavailable.Type == intstr.String {
percentage, err := strconv.ParseInt(strings.TrimSuffix(maxUnavailable.StrVal, "%"), 10, 32)
if err != nil {
return 0, fmt.Errorf("invalid maxUnavailable percentage: %v", err)
}
maxUnavailableInt = int(totalReplicas) * int(percentage) / 100
} else {
maxUnavailableInt = int(maxUnavailable.IntVal)
}
if maxUnavailableInt < 1 {
maxUnavailableInt = 1
}
return maxUnavailableInt, nil
}
func (c *Controller) proactiveUpdate(pool *poolv1.VirtualMachinePool, vmUpdatedList []*virtv1.VirtualMachine) error {
// Handle unhealthy VMIs first to rollover any changes to the VMI spec in case last update failed
if err := c.handleUnhealthyVMIs(pool, vmUpdatedList); err != nil {
return err
}
maxUnavailableInt, err := calculateMaxUnavailableInt(pool)
if err != nil {
return err
}
unavailableCount, err := c.getUnavailableVMICount(vmUpdatedList)
if err != nil {
return err
}
if pool.Spec.UpdateStrategy != nil && pool.Spec.UpdateStrategy.Proactive != nil {
sortPolicy := resolveSortPolicyForUpdate(pool.Spec.UpdateStrategy.Proactive)
sortVMsBasedOnSortPolicy(vmUpdatedList, sortPolicy)
}
maxUpdatable := maxUnavailableInt - unavailableCount
for i := range vmUpdatedList {
if maxUpdatable <= 0 {
log.Log.V(4).Infof("Delaying proactive update for pool %s/%s - max unavailable (%d) reached", pool.Namespace, pool.Name, maxUnavailableInt)
key, err := controller.KeyFunc(pool)
if err != nil {
return err
}
c.queue.AddAfter(key, defaultRetryDelay)
return nil
}
vm := vmUpdatedList[i]
obj, exists, err := c.vmiStore.GetByKey(controller.NamespacedKey(vm.Namespace, vm.Name))
if err != nil {
return err
}
if !exists {
continue
}
vmi := obj.(*virtv1.VirtualMachineInstance)
updateType, err := c.isOutdatedVMI(vm, vmi)
if err != nil {
return err
}
if updateType == proactiveUpdateTypeNone {
continue
}
if err := c.handleResourceUpdate(pool, vm, vmi, updateType); err != nil {
return err
}
maxUpdatable--
}
return nil
}
type proactiveUpdateType string
const (
// VMI spec has changed within vmi pool and requires restart
proactiveUpdateTypeRestart proactiveUpdateType = "restart"
// VMI spec is identify in current vmi pool, just needs revision label updated
proactiveUpdateTypePatchRevisionLabel proactiveUpdateType = "label-patch"
// VMI does not need an update
proactiveUpdateTypeNone proactiveUpdateType = "no-update"
// VM needs to be deleted due to data volume changes
proactiveUpdateTypeVMDelete proactiveUpdateType = "vm-delete"
)
func (c *Controller) isOutdatedVMI(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (proactiveUpdateType, error) {
// This function compares the pool revision (pool spec at a specific point in time) synced
// to the VM vs the one used to create the VMI. By comparing the pool spec revisions between
// the VM and VMI we can determine if the VM has mutated in a way that should result
// in the VMI being updated. If the VMITemplate in these two pool revisions are not identical,
// the VMI needs to be updated via forced restart when proactive updates are in use.
//
// Rules for determining if a VMI is out of date or not
//
// 1. If the VM revision name doesn't exist, it's going to get set by the reconcile loop.
// The (opportunist update) logic handles ensuring the VM revision name will get set again
// on a future reconcile loop.
// 2. If the VMI revision name doesn't exist, the VMI has to be proactively restarted
// because we have no history of what revision was used to originate the VMI. The
// VM is an offline config we're comparing to, but the VMI is the active config.
// 3. Compare the VMI template in the pool revision associated with the VM to the one
// associated with the VMI. If they are identical in name or DeepEquals, then no
// proactive restart is required.
// 4. If the expected VMI template specs from the revisions are not identical in name, but
// are identical in DeepEquals, patch the VMI with the new revision name used on the vm.
// 5. If only the DataVolumeTemplates differ, the VM needs to be deleted to ensure proper data volume handling.
vmRevisionName, exists := vm.Labels[virtv1.VirtualMachinePoolRevisionName]
if !exists {
// If we can't detect the VM revision then consider the outdated
// status as not being required. The VM revision will get set again
// by this controller on a future reconcile loop
return proactiveUpdateTypeNone, nil
}
vmiRevisionName, exists := vmi.Labels[virtv1.VirtualMachinePoolRevisionName]
if !exists {
// If the VMI doesn't have the revision label, then it is outdated
log.Log.Infof("Marking vmi %s/%s for update due to missing revision label", vm.Namespace, vm.Name)
return proactiveUpdateTypeRestart, nil
}
if vmRevisionName == vmiRevisionName {
// no update required because revisions match
return proactiveUpdateTypeNone, nil
}
// Get the pool revision used to create the VM
poolSpecRevisionForVM, exists, err := c.getControllerRevision(vm.Namespace, vmRevisionName)
if err != nil {
return proactiveUpdateTypeNone, err
} else if !exists {
// if the revision associated with the pool can't be found, then
// no update is required at this time. The revision will eventually
// get created in a future reconcile loop and we'll be able to process the VMI.
return proactiveUpdateTypeNone, nil
}
expectedVMITemplate := poolSpecRevisionForVM.VirtualMachineTemplate.Spec.Template
expectedDataVolumeTemplates := poolSpecRevisionForVM.VirtualMachineTemplate.Spec.DataVolumeTemplates
// Get the pool revision used to create the VMI
poolSpecRevisionForVMI, exists, err := c.getControllerRevision(vm.Namespace, vmiRevisionName)
if err != nil {
return proactiveUpdateTypeRestart, err
} else if !exists {
// if the VMI does not have an associated revision, then we have to force
// an update
log.Log.Infof("Marking vmi %s/%s for update due to missing revision", vm.Namespace, vm.Name)
return proactiveUpdateTypeRestart, nil
}
currentVMITemplate := poolSpecRevisionForVMI.VirtualMachineTemplate.Spec.Template
currentDataVolumeTemplates := poolSpecRevisionForVMI.VirtualMachineTemplate.Spec.DataVolumeTemplates
// If DataVolumeTemplates differ, we need to delete the VM to ensure proper data volume handling
if !equality.Semantic.DeepEqual(currentDataVolumeTemplates, expectedDataVolumeTemplates) {
log.Log.Infof("Marking vm %s/%s for deletion due to data volume changes", vm.Namespace, vm.Name)
return proactiveUpdateTypeVMDelete, nil
}
// If the VMI templates differ between the revision used to create
// the VM and the revision used to create the VMI, then the VMI
// must be updated.
if !equality.Semantic.DeepEqual(currentVMITemplate, expectedVMITemplate) {
log.Log.Infof("Marking vmi %s/%s for update due out of sync spec", vm.Namespace, vm.Name)
return proactiveUpdateTypeRestart, nil
}
// If we get here, the vmi templates are identical, but the revision
// names are different, so patch the VMI with a new revision name.
return proactiveUpdateTypePatchRevisionLabel, nil
}
func (c *Controller) isOutdatedVM(pool *poolv1.VirtualMachinePool, vm *virtv1.VirtualMachine) (bool, error) {
if vm.Labels == nil {
log.Log.Object(pool).Infof("Marking vm %s/%s for update due to missing labels ", vm.Namespace, vm.Name)
return true, nil
}
revisionName, exists := vm.Labels[virtv1.VirtualMachinePoolRevisionName]
if !exists {
log.Log.Object(pool).Infof("Marking vm %s/%s for update due to missing revision labels ", vm.Namespace, vm.Name)
return true, nil
}
oldPoolSpec, exists, err := c.getControllerRevision(pool.Namespace, revisionName)
if err != nil {
return true, err
} else if !exists {
log.Log.Object(pool).Infof("Marking vm %s/%s for update due to missing revision", vm.Namespace, vm.Name)
return true, nil
}
if !equality.Semantic.DeepEqual(oldPoolSpec.VirtualMachineTemplate, pool.Spec.VirtualMachineTemplate) {
log.Log.Object(pool).Infof("Marking vm %s/%s for update due out of date spec", vm.Namespace, vm.Name)
return true, nil
}
return false, nil
}
func (c *Controller) pruneUnusedRevisions(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine) common.SyncError {
keys, err := c.revisionIndexer.IndexKeys("vmpool", string(pool.UID))
if err != nil {
return common.NewSyncError(fmt.Errorf("error while pruning vmpool revisions: %v", err), FailedRevisionPruningReason)
}
deletionMap := make(map[string]interface{})
for _, key := range keys {
strs := strings.Split(key, "/")
if len(strs) != 2 {
continue
}
deletionMap[strs[1]] = nil
}
for _, vm := range vms {
// Check to see what revision is used by the VM, and remove
// that from the revision prune list
revisionName, exists := vm.Labels[virtv1.VirtualMachinePoolRevisionName]
if exists {
// remove from deletionMap since we found a VM that references this revision
delete(deletionMap, revisionName)
}
// Check to see what revision is used by the VMI, and remove
// that from the revision prune list
vmiKey := controller.NamespacedKey(vm.Namespace, vm.Name)
obj, exists, _ := c.vmiStore.GetByKey(vmiKey)
if exists {
vmi := obj.(*virtv1.VirtualMachineInstance)
revisionName, exists = vmi.Labels[virtv1.VirtualMachinePoolRevisionName]
if exists {
// remove from deletionMap since we found a VMI that references this revision
delete(deletionMap, revisionName)
}
}
}
for revisionName := range deletionMap {
err := c.clientset.AppsV1().ControllerRevisions(pool.Namespace).Delete(context.Background(), revisionName, metav1.DeleteOptions{})
if err != nil {
return common.NewSyncError(fmt.Errorf("error while pruning vmpool revisions: %v", err), FailedRevisionPruningReason)
}
}
return nil
}
func (c *Controller) update(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine) (common.SyncError, bool) {
if pool.Spec.UpdateStrategy != nil && pool.Spec.UpdateStrategy.Unmanaged != nil {
log.Log.V(4).Infof("unmanaged update strategy is set, skipping update: updating VMs/VMIs is not allowed")
return nil, true
}
var err error
filteredVms := slices.Clone(vms)
if hasSelectorsSelectionPolicyForUpdate(pool) {
filteredVms, err = filterVMsBasedOnSelectors(vms, pool.Spec.UpdateStrategy.Proactive.SelectionPolicy.Selectors)
if err != nil {
return common.NewSyncError(fmt.Errorf("failed to filter VMs based on ordered policies: %v", err), FailedUpdateReason), false
}
}
// List of VMs that need to be updated
var vmOutdatedList []*virtv1.VirtualMachine
// List of VMs that are up-to-date that need to be checked to see if VMI is up-to-date
var vmUpdatedList []*virtv1.VirtualMachine
for _, vm := range filteredVms {
outdated, err := c.isOutdatedVM(pool, vm)
if err != nil {
return common.NewSyncError(fmt.Errorf("error while detecting outdated VMs: %v", err), FailedUpdateReason), false
}
if outdated {
vmOutdatedList = append(vmOutdatedList, vm)
} else {
vmUpdatedList = append(vmUpdatedList, vm)
}
}
// Always perform opportunistic updates
if err = c.opportunisticUpdate(pool, vmOutdatedList); err != nil {
return common.NewSyncError(fmt.Errorf("error during VM update: %v", err), FailedUpdateReason), false
}
// Perform proactive updates only if not in opportunistic mode
if !isOpportunisticUpdate(pool) {
if err = c.proactiveUpdate(pool, vmUpdatedList); err != nil {
return common.NewSyncError(fmt.Errorf("error during VMI update: %v", err), FailedUpdateReason), false
}
}
vmUpdateStable := false
if len(vmOutdatedList) == 0 {
vmUpdateStable = true
}
return nil, vmUpdateStable
}
// Execute runs commands from the controller queue, if there is
// an error it requeues the command. Returns false if the queue
// is empty.
func (c *Controller) Execute() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
virtControllerPoolWorkQueueTracer.StartTrace(key, "virt-controller VMPool workqueue", trace.Field{Key: "Workqueue Key", Value: key})
defer virtControllerPoolWorkQueueTracer.StopTrace(key)
err := c.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing pool %v", key)
c.queue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed pool %v", key)
c.queue.Forget(key)
}
return true
}
func (c *Controller) updateStatus(origPool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine, syncErr common.SyncError) error {
key, err := controller.KeyFunc(origPool)
if err != nil {
return err
}
defer virtControllerPoolWorkQueueTracer.StepTrace(key, "updateStatus", trace.Field{Key: "VMPool Name", Value: origPool.Name})
pool := origPool.DeepCopy()
labelSelector, err := metav1.LabelSelectorAsSelector(pool.Spec.Selector)
if err != nil {
return err
}
pool.Status.LabelSelector = labelSelector.String()
cm := controller.NewVirtualMachinePoolConditionManager()
if syncErr != nil && !cm.HasCondition(pool, poolv1.VirtualMachinePoolReplicaFailure) {
cm.UpdateCondition(pool,
&poolv1.VirtualMachinePoolCondition{
Type: poolv1.VirtualMachinePoolReplicaFailure,
Reason: syncErr.Reason(),
Message: syncErr.Error(),
LastTransitionTime: metav1.Now(),
Status: k8score.ConditionTrue,
})
c.recorder.Eventf(pool, k8score.EventTypeWarning, syncErr.Reason(), syncErr.Error())
} else if syncErr == nil && cm.HasCondition(pool, poolv1.VirtualMachinePoolReplicaFailure) {
cm.RemoveCondition(pool, poolv1.VirtualMachinePoolReplicaFailure)
}
if pool.Spec.Paused && !cm.HasCondition(pool, poolv1.VirtualMachinePoolReplicaPaused) {
cm.UpdateCondition(pool,
&poolv1.VirtualMachinePoolCondition{
Type: poolv1.VirtualMachinePoolReplicaPaused,
Reason: SuccessfulPausedPoolReason,
Message: "Pool controller is paused",
LastTransitionTime: metav1.Now(),
Status: k8score.ConditionTrue,
})
c.recorder.Eventf(pool, k8score.EventTypeNormal, SuccessfulPausedPoolReason, "Pool is paused")
} else if !pool.Spec.Paused && cm.HasCondition(pool, poolv1.VirtualMachinePoolReplicaPaused) {
cm.RemoveCondition(pool, poolv1.VirtualMachinePoolReplicaPaused)
c.recorder.Eventf(pool, k8score.EventTypeNormal, SuccessfulResumePoolReason, "Pool is unpaused")
}
pool.Status.Replicas = int32(len(vms))
pool.Status.ReadyReplicas = int32(len(c.filterReadyVMs(vms)))
if !equality.Semantic.DeepEqual(pool.Status, origPool.Status) || pool.Status.Replicas != pool.Status.ReadyReplicas {
_, err := c.clientset.VirtualMachinePool(pool.Namespace).UpdateStatus(context.Background(), pool, metav1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
func (c *Controller) execute(key string) error {
logger := log.DefaultLogger()
var syncErr common.SyncError
obj, poolExists, err := c.poolIndexer.GetByKey(key)
if err != nil {
return err
}
var pool *poolv1.VirtualMachinePool
if poolExists {
pool = obj.(*poolv1.VirtualMachinePool)
logger = logger.Object(pool)
} else {
c.expectations.DeleteExpectations(key)
return nil
}
selector, err := metav1.LabelSelectorAsSelector(pool.Spec.Selector)
if err != nil {
logger.Reason(err).Error("Invalid selector on pool, will not re-enqueue.")
return nil
}
if !selector.Matches(labels.Set(pool.Spec.VirtualMachineTemplate.ObjectMeta.Labels)) {
logger.Reason(err).Error("Selector does not match template labels, will not re-enqueue.")
return nil
}
vms, err := c.listVMsFromNamespace(pool.ObjectMeta.Namespace)
if err != nil {
logger.Reason(err).Error("Failed to fetch vms for namespace from cache.")
return err
}
if isAutohealingEnabled(pool) {
if err := c.autoHealFailingVMs(pool, vms); err != nil {
logger.Reason(err).Error("Failed to auto heal failing vms.")
return err
}
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing VirtualMachines (see kubernetes/kubernetes#42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := c.clientset.VirtualMachinePool(pool.ObjectMeta.Namespace).Get(context.Background(), pool.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.ObjectMeta.UID != pool.ObjectMeta.UID {
return nil, fmt.Errorf("original Pool %v/%v is gone: got uid %v, wanted %v", pool.Namespace, pool.Name, fresh.UID, pool.UID)
}
return fresh, nil
})
cm := controller.NewVirtualMachineControllerRefManager(controller.RealVirtualMachineControl{Clientset: c.clientset}, pool, selector, virtv1.VirtualMachineInstanceReplicaSetGroupVersionKind, canAdoptFunc)
vms, err = cm.ReleaseDetachedVirtualMachines(vms)
if err != nil {
return err
}
if pool.DeletionTimestamp == nil {
if err := c.addPoolFinalizer(pool); err != nil {
return err
}
}
needsSync := c.expectations.SatisfiedExpectations(key)
if needsSync && !pool.Spec.Paused && pool.DeletionTimestamp == nil {
scaleIsStable := false
updateIsStable := false
syncErr, scaleIsStable = c.scale(pool, vms)
if syncErr != nil {
logger.Reason(err).Error("Scaling the pool failed.")
}
needsSync = c.expectations.SatisfiedExpectations(key)
if needsSync && scaleIsStable && syncErr == nil {
// Handle updates after scale operations are satisfied.
syncErr, updateIsStable = c.update(pool, vms)
}
needsSync = c.expectations.SatisfiedExpectations(key)
if needsSync && syncErr == nil && scaleIsStable && updateIsStable {
// handle pruning revisions after scale and update operations are satisfied
syncErr = c.pruneUnusedRevisions(pool, vms)
}
virtControllerPoolWorkQueueTracer.StepTrace(key, "sync", trace.Field{Key: "VMPool Name", Value: pool.Name})
} else if pool.DeletionTimestamp != nil {
if err := c.handlePoolDeletion(pool); err != nil {
return err
}
syncErr = c.pruneUnusedRevisions(pool, vms)
}
err = c.updateStatus(pool, vms, syncErr)
if err != nil {
return err
}
return syncErr
}
func (c *Controller) handleResourceUpdate(pool *poolv1.VirtualMachinePool, vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance, updateType proactiveUpdateType) error {
var err error
switch updateType {
case proactiveUpdateTypeRestart:
err = c.clientset.VirtualMachineInstance(vm.ObjectMeta.Namespace).Delete(context.Background(), vmi.ObjectMeta.Name, metav1.DeleteOptions{})
log.Log.Object(pool).Infof("Proactive update of VM %s/%s by deleting outdated VMI", vm.Namespace, vm.Name)
case proactiveUpdateTypeVMDelete:
err = c.clientset.VirtualMachine(vm.Namespace).Delete(context.Background(), vm.Name, metav1.DeleteOptions{PropagationPolicy: pointer.P(metav1.DeletePropagationForeground)})
log.Log.Object(pool).Infof("Proactive update of VM %s/%s by deleting VM due to data volume changes", vm.Namespace, vm.Name)
case proactiveUpdateTypePatchRevisionLabel:
patchSet := patch.New()
vmiLabels := maps.Clone(vmi.Labels)
if vmiLabels == nil {
vmiLabels = make(map[string]string)
}
revisionName, exists := vm.Labels[virtv1.VirtualMachinePoolRevisionName]
if !exists {
return nil
}
vmiLabels[virtv1.VirtualMachinePoolRevisionName] = revisionName
if vmi.Labels == nil {
patchSet.AddOption(patch.WithAdd("/metadata/labels", vmiLabels))
} else {
patchSet.AddOption(
patch.WithTest("/metadata/labels", vmi.Labels),
patch.WithReplace("/metadata/labels", vmiLabels),
)
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return fmt.Errorf("failed to marshal patch: %v", err)
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("patching of vmi labels with new pool revision name: %v", err)
}
log.Log.Object(pool).Infof("Proactive update of VM %s/%s in pool via label patch", vm.Namespace, vm.Name)
}
if err != nil {
c.recorder.Eventf(pool, k8score.EventTypeWarning, FailedUpdateVirtualMachineReason, "Error updating resource %s/%s: %v", vm.Namespace, vm.Name, err)
return err
}
c.recorder.Eventf(pool, k8score.EventTypeNormal, common.SuccessfulDeleteVirtualMachineReason, "Successfully updated resource %s/%s", vm.Namespace, vm.Name)
return nil
}
func patchFinalizer(oldFinalizers, newFinalizers []string) ([]byte, error) {
return patch.New(
patch.WithTest("/metadata/finalizers", oldFinalizers),
patch.WithReplace("/metadata/finalizers", newFinalizers)).
GeneratePayload()
}
func removeFinalizerFromList(origFinalizers []string, finalizer string) []string {
var filtered []string
for _, f := range origFinalizers {
if f != finalizer {
filtered = append(filtered, f)
}
}
return filtered
}
func (c *Controller) removeFinalizer(vm *virtv1.VirtualMachine) error {
if !controller.HasFinalizer(vm, poolv1.VirtualMachinePoolControllerFinalizer) {
return nil
}
newFinalizers := removeFinalizerFromList(vm.Finalizers, poolv1.VirtualMachinePoolControllerFinalizer)
patch, err := patchFinalizer(vm.Finalizers, newFinalizers)
if err != nil {
return err
}
_, err = c.clientset.VirtualMachine(vm.Namespace).Patch(context.Background(), vm.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
if k8serrors.IsNotFound(err) {
return nil
}
return err
}
func (c *Controller) addPoolFinalizer(pool *poolv1.VirtualMachinePool) error {
if controller.HasFinalizer(pool, poolv1.VirtualMachinePoolControllerFinalizer) {
return nil
}
newFinalizers := make([]string, 0, len(pool.Finalizers))
copy(newFinalizers, pool.Finalizers)
newFinalizers = append(newFinalizers, poolv1.VirtualMachinePoolControllerFinalizer)
patch, err := patchFinalizer(pool.Finalizers, newFinalizers)
if err != nil {
log.Log.Object(pool).Errorf("Failed to marshal patch: %v", err)
return err
}
_, err = c.clientset.VirtualMachinePool(pool.Namespace).Patch(context.Background(), pool.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
if k8serrors.IsNotFound(err) {
return nil
}
return err
}
func (c *Controller) removePoolFinalizer(pool *poolv1.VirtualMachinePool) error {
if !controller.HasFinalizer(pool, poolv1.VirtualMachinePoolControllerFinalizer) {
return nil
}
newFinalizers := removeFinalizerFromList(pool.Finalizers, poolv1.VirtualMachinePoolControllerFinalizer)
patch, err := patchFinalizer(pool.Finalizers, newFinalizers)
if err != nil {
log.Log.Object(pool).Errorf("Failed to marshal patch: %v", err)
return err
}
_, err = c.clientset.VirtualMachinePool(pool.Namespace).Patch(context.Background(), pool.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
if k8serrors.IsNotFound(err) {
return nil
}
return err
}
func (c *Controller) cleanupVMs(vms []*virtv1.VirtualMachine) error {
var lastErr error
for _, vm := range vms {
if err := c.removeFinalizer(vm); err != nil {
log.Log.Object(vm).Errorf("Failed to remove finalizer: %v", err)
lastErr = err
}
}
return lastErr
}
func (c *Controller) handlePoolDeletion(pool *poolv1.VirtualMachinePool) error {
vms, err := c.listVMsFromNamespace(pool.ObjectMeta.Namespace)
if err != nil {
return err
}
var vmsToClean []*virtv1.VirtualMachine
for _, vm := range vms {
selector, err := metav1.LabelSelectorAsSelector(pool.Spec.Selector)
if err != nil {
return err
}
if !selector.Matches(labels.Set(vm.Labels)) {
continue
}
if !controller.HasFinalizer(vm, poolv1.VirtualMachinePoolControllerFinalizer) {
continue
}
vmsToClean = append(vmsToClean, vm)
}
if err := c.cleanupVMs(vmsToClean); err != nil {
return err
}
if err := c.removePoolFinalizer(pool); err != nil {
return err
}
return nil
}
func (c *Controller) statePreservationCleanupforVM(pool *poolv1.VirtualMachinePool, vm *virtv1.VirtualMachine, preserveState bool) error {
if preserveState {
if err := c.removePVCOwnerReferences(vm); err != nil {
c.recorder.Eventf(pool, k8score.EventTypeWarning, FailedUpdateVirtualMachineReason, "Error removing PVC owner references for VM %s/%s: %v", vm.Namespace, vm.Name, err)
return err
}
if err := c.removeDataVolumeOwnerReferences(vm); err != nil {
c.recorder.Eventf(pool, k8score.EventTypeWarning, FailedUpdateVirtualMachineReason, "Error removing DataVolume owner references for VM %s/%s: %v", vm.Namespace, vm.Name, err)
return err
}
}
if err := c.removeFinalizer(vm); err != nil {
c.recorder.Eventf(pool, k8score.EventTypeWarning, FailedUpdateVirtualMachineReason, "Error removing finalizer for VM %s/%s: %v", vm.Namespace, vm.Name, err)
return err
}
log.Log.Object(vm).Infof("Removing VM %s/%s from pool", vm.Namespace, vm.Name)
return nil
}
func (c *Controller) removeDataVolumeOwnerReferences(vm *virtv1.VirtualMachine) error {
log.Log.Object(vm).Infof("Removing DataVolume owner references for VM %s/%s", vm.Namespace, vm.Name)
dvNames := make(map[string]bool)
// Get DataVolume names from DataVolumeTemplates
for _, dvTemplate := range vm.Spec.DataVolumeTemplates {
dvNames[dvTemplate.Name] = true
}
// Get existing DataVolume names directly referenced in volumes
for _, volume := range vm.Spec.Template.Spec.Volumes {
if volume.DataVolume != nil {
dvNames[volume.DataVolume.Name] = true
}
}
for dvName := range dvNames {
key := controller.NamespacedKey(vm.Namespace, dvName)
obj, exists, err := c.dvStore.GetByKey(key)
if err != nil {
return fmt.Errorf("failed to get DataVolume %s from store: %v", dvName, err)
}
if !exists {
log.Log.Object(vm).Warningf("DataVolume %s does not exist", dvName)
continue
}
dv, ok := obj.(*cdiv1.DataVolume)
if !ok {
continue
}
if metav1.IsControlledBy(dv, vm) {
patchBytes, err := patchOwnerReferences(dv, vm)
if err != nil {
return fmt.Errorf("failed to patch owner references for DataVolume %s: %v", dv.Name, err)
}
_, err = c.clientset.CdiClient().CdiV1beta1().DataVolumes(vm.Namespace).Patch(context.Background(), dv.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("failed to remove owner reference from DataVolume %s: %v", dv.Name, err)
}
}
}
return nil
}
func (c *Controller) removePVCOwnerReferences(vm *virtv1.VirtualMachine) error {
log.Log.Object(vm).Infof("Removing PVC owner references for VM %s/%s", vm.Namespace, vm.Name)
pvcNames := make(map[string]bool)
// Get PVCs from DataVolumeTemplates (these become PVCs via DVs)
for _, dvTemplate := range vm.Spec.DataVolumeTemplates {
pvcNames[dvTemplate.Name] = true
}
// Get existing PVC names directly referenced in volumes
for _, volume := range vm.Spec.Template.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
pvcNames[volume.PersistentVolumeClaim.ClaimName] = true
}
}
for pvcName := range pvcNames {
key := controller.NamespacedKey(vm.Namespace, pvcName)
obj, exists, err := c.pvcStore.GetByKey(key)
if err != nil {
return fmt.Errorf("failed to get PVC %s from store: %v", pvcName, err)
}
if !exists {
log.Log.Object(vm).Warningf("PVC %s does not exist", pvcName)
continue
}
pvc, ok := obj.(*k8score.PersistentVolumeClaim)
if !ok {
continue
}
if metav1.IsControlledBy(pvc, vm) {
patchBytes, err := patchOwnerReferences(pvc, vm)
if err != nil {
return fmt.Errorf("failed to patch owner references for PVC %s: %v", pvc.Name, err)
}
_, err = c.clientset.CoreV1().PersistentVolumeClaims(vm.Namespace).Patch(context.Background(), pvcName, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("failed to remove owner reference from PVC %s: %v", pvc.Name, err)
}
}
}
return nil
}
func patchOwnerReferences(obj metav1.Object, vm *virtv1.VirtualMachine) ([]byte, error) {
newOwnerRefs := make([]metav1.OwnerReference, 0, len(obj.GetOwnerReferences()))
for _, ownerRef := range obj.GetOwnerReferences() {
if ownerRef.UID != vm.UID {
newOwnerRefs = append(newOwnerRefs, ownerRef)
}
}
return patch.New(
patch.WithTest("/metadata/ownerReferences", obj.GetOwnerReferences()),
patch.WithReplace("/metadata/ownerReferences", newOwnerRefs)).GeneratePayload()
}
func isUnmanaged(pool *poolv1.VirtualMachinePool) bool {
return pool.Spec.ScaleInStrategy != nil &&
pool.Spec.ScaleInStrategy.Unmanaged != nil
}
func isOpportunisticScaleInEnabled(pool *poolv1.VirtualMachinePool) bool {
return pool.Spec.ScaleInStrategy != nil &&
pool.Spec.ScaleInStrategy.Opportunistic != nil
}
func hasSelectorsSelectionPolicyForUpdate(pool *poolv1.VirtualMachinePool) bool {
if pool.Spec.UpdateStrategy == nil || pool.Spec.UpdateStrategy.Proactive == nil || pool.Spec.UpdateStrategy.Proactive.SelectionPolicy == nil || pool.Spec.UpdateStrategy.Proactive.SelectionPolicy.Selectors == nil {
return false
}
return true
}
func hasSelectorsSelectionPolicyForScaleIn(pool *poolv1.VirtualMachinePool) bool {
if pool.Spec.ScaleInStrategy == nil || pool.Spec.ScaleInStrategy.Proactive == nil || pool.Spec.ScaleInStrategy.Proactive.SelectionPolicy == nil || pool.Spec.ScaleInStrategy.Proactive.SelectionPolicy.Selectors == nil {
return false
}
return true
}
func resolveOpportunisticScaleInStatePreservation(pool *poolv1.VirtualMachinePool) poolv1.StatePreservation {
if pool.Spec.ScaleInStrategy == nil || pool.Spec.ScaleInStrategy.Opportunistic == nil || pool.Spec.ScaleInStrategy.Opportunistic.StatePreservation == nil {
return poolv1.StatePreservationDisabled
}
return *pool.Spec.ScaleInStrategy.Opportunistic.StatePreservation
}
func resolveProactiveScaleInStatePreservation(pool *poolv1.VirtualMachinePool) poolv1.StatePreservation {
if pool.Spec.ScaleInStrategy == nil || pool.Spec.ScaleInStrategy.Proactive == nil || pool.Spec.ScaleInStrategy.Proactive.StatePreservation == nil {
return poolv1.StatePreservationDisabled
}
return *pool.Spec.ScaleInStrategy.Proactive.StatePreservation
}
func isStatePreservationEnabled(statePreservation poolv1.StatePreservation) bool {
return statePreservation != poolv1.StatePreservationDisabled
}
func isOpportunisticUpdate(pool *poolv1.VirtualMachinePool) bool {
return pool.Spec.UpdateStrategy != nil && pool.Spec.UpdateStrategy.Opportunistic != nil
}
func isAutohealingEnabled(pool *poolv1.VirtualMachinePool) bool {
return pool.Spec.Autohealing != nil
}
func (c *Controller) autoHealFailingVMs(pool *poolv1.VirtualMachinePool, vms []*virtv1.VirtualMachine) error {
vmsToCleanup := filterFailingVMsToStart(vms, pool.Spec.Autohealing)
return c.scaleIn(pool, vmsToCleanup, len(vmsToCleanup))
}
func filterFailingVMsToStart(vms []*virtv1.VirtualMachine, autohealing *poolv1.VirtualMachinePoolAutohealingStrategy) []*virtv1.VirtualMachine {
var filtered []*virtv1.VirtualMachine
for _, vm := range vms {
// Check for consecutive VMI start failures (tracked in Status.StartFailure)
if vm.Status.StartFailure != nil && vm.Status.StartFailure.ConsecutiveFailCount >= getFailureToStartThreshold(autohealing) {
filtered = append(filtered, vm)
continue
}
// Check for status-based failures (CrashLoopBackOff, Unschedulable, etc.)
if shouldAutohealBasedOnStatus(vm, autohealing) {
filtered = append(filtered, vm)
}
}
return filtered
}
// shouldAutohealBasedOnStatus checks if a VM's PrintableStatus indicates it should be autohealed
func shouldAutohealBasedOnStatus(vm *virtv1.VirtualMachine, autohealing *poolv1.VirtualMachinePoolAutohealingStrategy) bool {
switch vm.Status.PrintableStatus {
case virtv1.VirtualMachineStatusCrashLoopBackOff,
virtv1.VirtualMachineStatusUnschedulable,
virtv1.VirtualMachineStatusDataVolumeError,
virtv1.VirtualMachineStatusPvcNotFound,
virtv1.VirtualMachineStatusErrImagePull,
virtv1.VirtualMachineStatusImagePullBackOff:
return hasVMBeenFailingLongEnough(vm, autohealing)
default:
return false
}
}
// hasVMBeenFailingLongEnough checks if VM has not been ready for minimum duration
func hasVMBeenFailingLongEnough(vm *virtv1.VirtualMachine, autohealing *poolv1.VirtualMachinePoolAutohealingStrategy) bool {
condManager := controller.NewVirtualMachineConditionManager()
if c := condManager.GetCondition(vm, virtv1.VirtualMachineReady); c != nil && c.Status == k8score.ConditionFalse {
failingSince := c.LastProbeTime.Time
if time.Since(failingSince) >= getMinFailingToStartDuration(autohealing) {
log.Log.Object(vm).Infof("VM %s/%s has been failing to start for %v, adding to list", vm.Namespace, vm.Name, time.Since(failingSince))
return true
}
}
return false
}
func getFailureToStartThreshold(autohealing *poolv1.VirtualMachinePoolAutohealingStrategy) int {
if autohealing.StartUpFailureThreshold == nil {
return defaultStartUpFailureThreshold
}
return int(*autohealing.StartUpFailureThreshold)
}
func getMinFailingToStartDuration(autohealing *poolv1.VirtualMachinePoolAutohealingStrategy) time.Duration {
if autohealing.MinFailingToStartDuration == nil {
return minFailingToStartDuration
}
return autohealing.MinFailingToStartDuration.Duration
}
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
// labels.Selector.
func nodeSelectorRequirementsAsSelector(nsm *[]k8score.NodeSelectorRequirement) (labels.Selector, error) {
if nsm == nil {
return labels.Nothing(), nil
}
selector := labels.NewSelector()
for _, expr := range *nsm {
var op selection.Operator
switch expr.Operator {
case k8score.NodeSelectorOpIn:
op = selection.In
case k8score.NodeSelectorOpNotIn:
op = selection.NotIn
case k8score.NodeSelectorOpExists:
op = selection.Exists
case k8score.NodeSelectorOpDoesNotExist:
op = selection.DoesNotExist
case k8score.NodeSelectorOpGt:
op = selection.GreaterThan
case k8score.NodeSelectorOpLt:
op = selection.LessThan
default:
return nil, fmt.Errorf("%q is not a valid label selector operator", expr.Operator)
}
r, err := labels.NewRequirement(expr.Key, op, expr.Values)
if err != nil {
return nil, err
}
selector = selector.Add(*r)
}
return selector, nil
}
// Copyright 2025 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package pool
import (
"k8s.io/client-go/util/workqueue"
"kubevirt.io/kubevirt/pkg/testutils"
)
// These utils are needed for the fuzzer
func ShutdownCtrlQueue(ctrl *Controller) {
ctrl.queue.ShutDown()
}
func SetQueue(ctrl *Controller, newQueue *testutils.MockWorkQueue[string]) {
ctrl.queue = newQueue
}
func GetQueue(ctrl *Controller) workqueue.TypedRateLimitingInterface[string] {
return ctrl.queue
}
// Copyright 2025 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package testutils
import (
"fmt"
gofuzzheaders "github.com/AdaLogics/go-fuzz-headers"
k8sv1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
clonev1 "kubevirt.io/api/clone/v1beta1"
virtv1 "kubevirt.io/api/core/v1"
poolv1 "kubevirt.io/api/pool/v1beta1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)
const (
validChars = "abcdefghijklmnopqrstuvwxyz0123456789"
validStartChars = "abcdefghijklmnopqrstuvwxyz"
validLabelChars = "abcdefghijklmnopqrstuvwxyz0123456789-_."
minNameLength = 5
maxNameLength = 63
minLabelLength = 3
maxLabelLength = 63
)
// generateValidK8sName generates a DNS-1123 subdomain compliant name
// DNS-1123 subdomain must consist of lowercase alphanumeric characters, '-' or '.',
// and must start and end with an alphanumeric character
func generateValidK8sName(c gofuzzheaders.Continue, prefix string) string {
length := minNameLength + c.Intn(maxNameLength-minNameLength)
// Ensure length is positive
if length <= 0 {
length = minNameLength
}
// If prefix is too long, truncate it to fit within the length
if len(prefix) >= length {
// Ensure we don't try to slice with negative or out of bounds index
if length > len(prefix) {
length = len(prefix)
}
if length < 0 {
length = 0
}
return prefix[:length]
}
// Start with prefix and a valid start character
name := prefix + "-"
startIdx := c.Intn(len(validStartChars))
if startIdx < 0 || startIdx >= len(validStartChars) {
startIdx = 0
}
name += string(validStartChars[startIdx])
// Calculate remaining length, accounting for the final character we'll add
remainingLength := length - len(prefix) - 2
if remainingLength < 0 {
remainingLength = 0
}
// Add random valid characters
for i := 0; i < remainingLength; i++ {
idx := c.Intn(len(validChars))
if idx < 0 || idx >= len(validChars) {
idx = 0
}
name += string(validChars[idx])
}
// Only add final character if we have room
if len(name) < length {
endIdx := c.Intn(len(validChars))
if endIdx < 0 || endIdx >= len(validChars) {
endIdx = 0
}
name += string(validChars[endIdx])
}
return name
}
// generateValidK8sLabel generates a valid Kubernetes label value
// Labels can be empty or consist of alphanumeric, '-', '_', '.'
// and must start and end with alphanumeric
func generateValidK8sLabel(c gofuzzheaders.Continue, prefix string) string {
// Sometimes generate empty label (valid)
if c.Intn(10) == 0 {
return ""
}
length := minLabelLength + c.Intn(maxLabelLength-minLabelLength)
// Ensure length is positive
if length <= 0 {
length = minLabelLength
}
// If prefix is already longer than or equal to desired length, just return it truncated
if len(prefix) >= length {
// Ensure we don't try to slice with negative or out of bounds index
if length > len(prefix) {
length = len(prefix)
}
if length < 0 {
length = 0
}
return prefix[:length]
}
// Start with valid character
label := prefix
if len(prefix) == 0 {
startIdx := c.Intn(len(validStartChars))
if startIdx < 0 || startIdx >= len(validStartChars) {
startIdx = 0
}
label = string(validStartChars[startIdx])
}
// Calculate remaining length needed, accounting for the final character we'll add
remainingLength := length - len(label) - 1
if remainingLength < 0 {
remainingLength = 0
}
// Add random valid characters
for i := 0; i < remainingLength; i++ {
idx := c.Intn(len(validLabelChars))
if idx < 0 || idx >= len(validLabelChars) {
idx = 0
}
label += string(validLabelChars[idx])
}
// Only add final character if we have room
if len(label) < length {
endIdx := c.Intn(len(validChars))
if endIdx < 0 || endIdx >= len(validChars) {
endIdx = 0
}
label += string(validChars[endIdx])
}
return label
}
// CustomObjectMetaFuzzer creates a custom fuzzer for ObjectMeta that generates
// valid DNS-1123 compliant names and labels
func CustomObjectMetaFuzzer(namespace string) func(*metav1.ObjectMeta, gofuzzheaders.Continue) {
return func(objectMeta *metav1.ObjectMeta, c gofuzzheaders.Continue) {
objectMeta.Name = generateValidK8sName(c, "resource")
objectMeta.Namespace = namespace
// Generate valid labels
objectMeta.Labels = map[string]string{
"app.kubernetes.io/name": generateValidK8sLabel(c, "kubevirt"),
"app.kubernetes.io/component": generateValidK8sLabel(c, "virt"),
"kubevirt.io/test": "fuzzer",
}
// Generate valid annotations
objectMeta.Annotations = map[string]string{
"kubevirt.io/latest-observed-api-version": virtv1.ApiLatestVersion,
"description": generateValidK8sLabel(c, "test"),
}
// Set generation and resource version
objectMeta.Generation = c.Int63()
objectMeta.ResourceVersion = fmt.Sprintf("%d", c.Int63())
}
}
// CustomPodFuzzer creates a custom fuzzer for Pods with valid containers and specs
func CustomPodFuzzer() func(*k8sv1.Pod, gofuzzheaders.Continue) {
return func(pod *k8sv1.Pod, c gofuzzheaders.Continue) {
// Let ObjectMeta fuzzer handle metadata
// Just ensure containers are valid
if len(pod.Spec.Containers) == 0 {
pod.Spec.Containers = []k8sv1.Container{
{
Name: "compute",
Image: "kubevirt/virt-launcher:latest",
Resources: k8sv1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceMemory: resource.MustParse("64Mi"),
k8sv1.ResourceCPU: resource.MustParse("100m"),
},
},
},
}
} else {
// Fix existing containers
for i := range pod.Spec.Containers {
if pod.Spec.Containers[i].Name == "" {
pod.Spec.Containers[i].Name = fmt.Sprintf("container-%d", i)
}
if pod.Spec.Containers[i].Image == "" {
pod.Spec.Containers[i].Image = "kubevirt/virt-launcher:latest"
}
}
}
// Ensure valid restart policy
if pod.Spec.RestartPolicy == "" {
restartPolicies := []k8sv1.RestartPolicy{
k8sv1.RestartPolicyAlways,
k8sv1.RestartPolicyOnFailure,
k8sv1.RestartPolicyNever,
}
pod.Spec.RestartPolicy = restartPolicies[c.Intn(len(restartPolicies))]
}
}
}
// CustomPVCFuzzer creates a custom fuzzer for PersistentVolumeClaims with valid specs
func CustomPVCFuzzer() func(*k8sv1.PersistentVolumeClaim, gofuzzheaders.Continue) {
return func(pvc *k8sv1.PersistentVolumeClaim, c gofuzzheaders.Continue) {
// Ensure valid storage request
if pvc.Spec.Resources.Requests == nil {
pvc.Spec.Resources.Requests = k8sv1.ResourceList{
k8sv1.ResourceStorage: resource.MustParse("1Gi"),
}
}
// Ensure valid access modes
if len(pvc.Spec.AccessModes) == 0 {
accessModes := []k8sv1.PersistentVolumeAccessMode{
k8sv1.ReadWriteOnce,
k8sv1.ReadOnlyMany,
k8sv1.ReadWriteMany,
}
pvc.Spec.AccessModes = []k8sv1.PersistentVolumeAccessMode{
accessModes[c.Intn(len(accessModes))],
}
}
}
}
// CustomDataVolumeFuzzer creates a custom fuzzer for DataVolumes with valid specs
func CustomDataVolumeFuzzer() func(*cdiv1.DataVolume, gofuzzheaders.Continue) {
return func(dv *cdiv1.DataVolume, c gofuzzheaders.Continue) {
// Ensure valid PVC spec
if dv.Spec.PVC == nil {
storageRequest := resource.MustParse("1Gi")
dv.Spec.PVC = &k8sv1.PersistentVolumeClaimSpec{
AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce},
Resources: k8sv1.VolumeResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceStorage: storageRequest,
},
},
}
}
// Ensure valid source
if dv.Spec.Source == nil {
registryURL := "docker://kubevirt/fedora-cloud-container-disk-demo"
sources := []cdiv1.DataVolumeSource{
{HTTP: &cdiv1.DataVolumeSourceHTTP{URL: "http://example.com/image.img"}},
{Registry: &cdiv1.DataVolumeSourceRegistry{URL: ®istryURL}},
{PVC: &cdiv1.DataVolumeSourcePVC{Namespace: "default", Name: "source-pvc"}},
}
dv.Spec.Source = &sources[c.Intn(len(sources))]
}
}
}
// CustomVMIFuzzer creates a custom fuzzer for VirtualMachineInstance with valid specs
func CustomVMIFuzzer(namespace string) func(*virtv1.VirtualMachineInstance, gofuzzheaders.Continue) {
return func(vmi *virtv1.VirtualMachineInstance, c gofuzzheaders.Continue) {
// Ensure valid domain spec
if vmi.Spec.Domain.Resources.Requests == nil {
vmi.Spec.Domain.Resources.Requests = k8sv1.ResourceList{
k8sv1.ResourceMemory: resource.MustParse("64Mi"),
}
}
// Ensure at least one disk and volume
if len(vmi.Spec.Domain.Devices.Disks) == 0 {
vmi.Spec.Domain.Devices.Disks = []virtv1.Disk{
{
Name: "disk0",
DiskDevice: virtv1.DiskDevice{
Disk: &virtv1.DiskTarget{
Bus: virtv1.DiskBusVirtio,
},
},
},
}
}
if len(vmi.Spec.Volumes) == 0 {
vmi.Spec.Volumes = []virtv1.Volume{
{
Name: "disk0",
VolumeSource: virtv1.VolumeSource{
ContainerDisk: &virtv1.ContainerDiskSource{
Image: "kubevirt/cirros-container-disk-demo",
},
},
},
}
}
// Ensure network is set
if len(vmi.Spec.Networks) == 0 {
vmi.Spec.Networks = []virtv1.Network{
{
Name: "default",
NetworkSource: virtv1.NetworkSource{
Pod: &virtv1.PodNetwork{},
},
},
}
}
if len(vmi.Spec.Domain.Devices.Interfaces) == 0 {
vmi.Spec.Domain.Devices.Interfaces = []virtv1.Interface{
{
Name: "default",
InterfaceBindingMethod: virtv1.InterfaceBindingMethod{
Masquerade: &virtv1.InterfaceMasquerade{},
},
},
}
}
}
}
// CustomVMFuzzer creates a custom fuzzer for VirtualMachine with valid specs
func CustomVMFuzzer(namespace string) func(*virtv1.VirtualMachine, gofuzzheaders.Continue) {
return func(vm *virtv1.VirtualMachine, c gofuzzheaders.Continue) {
// Ensure template is set
if vm.Spec.Template == nil {
vm.Spec.Template = &virtv1.VirtualMachineInstanceTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"kubevirt.io/vm": vm.Name,
},
},
Spec: virtv1.VirtualMachineInstanceSpec{
Domain: virtv1.DomainSpec{
Resources: virtv1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceMemory: resource.MustParse("64Mi"),
},
},
Devices: virtv1.Devices{
Disks: []virtv1.Disk{
{
Name: "disk0",
DiskDevice: virtv1.DiskDevice{
Disk: &virtv1.DiskTarget{
Bus: virtv1.DiskBusVirtio,
},
},
},
},
},
},
Volumes: []virtv1.Volume{
{
Name: "disk0",
VolumeSource: virtv1.VolumeSource{
ContainerDisk: &virtv1.ContainerDiskSource{
Image: "kubevirt/cirros-container-disk-demo",
},
},
},
},
},
}
}
// Apply VMI fuzzer to template spec
CustomVMIFuzzer(namespace)(&virtv1.VirtualMachineInstance{
Spec: vm.Spec.Template.Spec,
}, c)
// Set running state randomly
running := c.RandBool()
vm.Spec.Running = &running
}
}
// CustomNodeFuzzer creates a custom fuzzer for Nodes with valid names and labels
func CustomNodeFuzzer() func(*k8sv1.Node, gofuzzheaders.Continue) {
return func(node *k8sv1.Node, c gofuzzheaders.Continue) {
// Ensure valid node name
if node.Name == "" {
node.Name = generateValidK8sName(c, "node")
}
// Ensure standard node labels
if node.Labels == nil {
node.Labels = make(map[string]string)
}
node.Labels["kubernetes.io/hostname"] = node.Name
node.Labels["kubernetes.io/os"] = "linux"
// Add capacity and allocatable if missing
if node.Status.Capacity == nil {
node.Status.Capacity = k8sv1.ResourceList{
k8sv1.ResourceCPU: resource.MustParse("4"),
k8sv1.ResourceMemory: resource.MustParse("8Gi"),
k8sv1.ResourcePods: resource.MustParse("110"),
}
}
if node.Status.Allocatable == nil {
node.Status.Allocatable = k8sv1.ResourceList{
k8sv1.ResourceCPU: resource.MustParse("4"),
k8sv1.ResourceMemory: resource.MustParse("8Gi"),
k8sv1.ResourcePods: resource.MustParse("110"),
}
}
// Set node to ready
node.Status.Conditions = []k8sv1.NodeCondition{
{
Type: k8sv1.NodeReady,
Status: k8sv1.ConditionTrue,
},
}
}
}
// CustomVMIMigrationFuzzer creates a custom fuzzer for VirtualMachineInstanceMigration
func CustomVMIMigrationFuzzer(namespace string) func(*virtv1.VirtualMachineInstanceMigration, gofuzzheaders.Continue) {
return func(migration *virtv1.VirtualMachineInstanceMigration, c gofuzzheaders.Continue) {
// Ensure valid VMI name reference
if migration.Spec.VMIName == "" {
migration.Spec.VMIName = generateValidK8sName(c, "vmi")
}
}
}
// CustomPodDisruptionBudgetFuzzer creates a custom fuzzer for PodDisruptionBudget
func CustomPodDisruptionBudgetFuzzer() func(*policyv1.PodDisruptionBudget, gofuzzheaders.Continue) {
return func(pdb *policyv1.PodDisruptionBudget, c gofuzzheaders.Continue) {
// Ensure valid selector
if pdb.Spec.Selector == nil {
pdb.Spec.Selector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubevirt.io/domain": generateValidK8sLabel(c, "vmi"),
},
}
}
// Ensure valid min available
if pdb.Spec.MinAvailable == nil && pdb.Spec.MaxUnavailable == nil {
minAvailable := intstr.FromInt(1)
pdb.Spec.MinAvailable = &minAvailable
}
}
}
// CustomNodeWithTaintsFuzzer creates a custom fuzzer for Nodes with evacuation taints
func CustomNodeWithTaintsFuzzer() func(*k8sv1.Node, gofuzzheaders.Continue) {
return func(node *k8sv1.Node, c gofuzzheaders.Continue) {
// First apply standard node fuzzer
CustomNodeFuzzer()(node, c)
// Add evacuation taints randomly
if c.RandBool() {
taintEffects := []k8sv1.TaintEffect{
k8sv1.TaintEffectNoSchedule,
k8sv1.TaintEffectNoExecute,
}
node.Spec.Taints = []k8sv1.Taint{
{
Key: "kubevirt.io/drain",
Value: "draining",
Effect: taintEffects[c.Intn(len(taintEffects))],
},
}
}
}
}
// CustomVMPoolFuzzer creates a custom fuzzer for VirtualMachinePool
func CustomVMPoolFuzzer(namespace string) func(*poolv1.VirtualMachinePool, gofuzzheaders.Continue) {
return func(pool *poolv1.VirtualMachinePool, c gofuzzheaders.Continue) {
// Set namespace and name manually
pool.Namespace = namespace
// Ensure valid name
if pool.Name == "" {
pool.Name = generateValidK8sName(c, "vmpool")
}
// Set basic ObjectMeta fields
if pool.UID == "" {
pool.UID = "test-uid"
}
if pool.ResourceVersion == "" {
pool.ResourceVersion = "1"
}
// Ensure valid replicas
if pool.Spec.Replicas == nil {
replicas := int32(1 + c.Intn(3))
pool.Spec.Replicas = &replicas
}
// Ensure valid selector
if pool.Spec.Selector == nil {
pool.Spec.Selector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubevirt.io/vmpool": pool.Name,
},
}
}
// Ensure valid VM template
if pool.Spec.VirtualMachineTemplate == nil {
running := false
pool.Spec.VirtualMachineTemplate = &poolv1.VirtualMachineTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"kubevirt.io/vmpool": pool.Name,
},
},
Spec: virtv1.VirtualMachineSpec{
Running: &running,
Template: &virtv1.VirtualMachineInstanceTemplateSpec{
Spec: virtv1.VirtualMachineInstanceSpec{
Domain: virtv1.DomainSpec{
Resources: virtv1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceMemory: resource.MustParse("64Mi"),
},
},
},
},
},
},
}
}
}
}
// CustomVMCloneFuzzer creates a custom fuzzer for VirtualMachineClone
func CustomVMCloneFuzzer(namespace string) func(*clonev1.VirtualMachineClone, gofuzzheaders.Continue) {
return func(vmClone *clonev1.VirtualMachineClone, c gofuzzheaders.Continue) {
// Ensure valid source reference
if vmClone.Spec.Source == nil {
vmClone.Spec.Source = &k8sv1.TypedLocalObjectReference{
APIGroup: &virtv1.SchemeGroupVersion.Group,
Kind: "VirtualMachine",
Name: generateValidK8sName(c, "source-vm"),
}
}
// Ensure valid target name
if vmClone.Spec.Target == nil {
vmClone.Spec.Target = &k8sv1.TypedLocalObjectReference{
APIGroup: &virtv1.SchemeGroupVersion.Group,
Kind: "VirtualMachine",
Name: generateValidK8sName(c, "target-vm"),
}
}
}
}
// CustomVMIReplicaSetFuzzer creates a custom fuzzer for VirtualMachineInstanceReplicaSet
func CustomVMIReplicaSetFuzzer(namespace string) func(*virtv1.VirtualMachineInstanceReplicaSet, gofuzzheaders.Continue) {
return func(rs *virtv1.VirtualMachineInstanceReplicaSet, c gofuzzheaders.Continue) {
// Ensure metadata is valid
rs.Namespace = namespace
rs.Name = generateValidK8sName(c, "rs")
if rs.Labels == nil {
rs.Labels = make(map[string]string)
}
rs.Labels["app"] = generateValidK8sLabel(c, "app")
// Ensure replicas is reasonable
if rs.Spec.Replicas == nil {
replicas := int32(c.Intn(10))
rs.Spec.Replicas = &replicas
}
// Ensure template is not nil
if rs.Spec.Template == nil {
rs.Spec.Template = &virtv1.VirtualMachineInstanceTemplateSpec{}
}
// Ensure template has valid labels
if rs.Spec.Template.ObjectMeta.Labels == nil {
rs.Spec.Template.ObjectMeta.Labels = make(map[string]string)
}
rs.Spec.Template.ObjectMeta.Labels["app"] = generateValidK8sLabel(c, "app")
// Ensure selector matches template labels
if rs.Spec.Selector == nil {
rs.Spec.Selector = &metav1.LabelSelector{}
}
rs.Spec.Selector.MatchLabels = rs.Spec.Template.ObjectMeta.Labels
// Ensure template spec is valid
if rs.Spec.Template.Spec.Domain.Devices.Disks == nil {
rs.Spec.Template.Spec.Domain.Devices.Disks = []virtv1.Disk{
{
Name: generateValidK8sName(c, "disk"),
DiskDevice: virtv1.DiskDevice{
Disk: &virtv1.DiskTarget{
Bus: "virtio",
},
},
},
}
}
if rs.Spec.Template.Spec.Volumes == nil {
rs.Spec.Template.Spec.Volumes = []virtv1.Volume{
{
Name: rs.Spec.Template.Spec.Domain.Devices.Disks[0].Name,
VolumeSource: virtv1.VolumeSource{
ContainerDisk: &virtv1.ContainerDiskSource{
Image: "kubevirt/fedora-cloud-container-disk-demo",
},
},
},
}
}
}
}
// CustomVMSnapshotFuzzer creates a custom fuzzer for VirtualMachineSnapshot
func CustomVMSnapshotFuzzer(namespace string) func(*snapshotv1.VirtualMachineSnapshot, gofuzzheaders.Continue) {
return func(vmSnapshot *snapshotv1.VirtualMachineSnapshot, c gofuzzheaders.Continue) {
// Ensure valid metadata
vmSnapshot.Namespace = namespace
vmSnapshot.Name = generateValidK8sName(c, "snapshot")
if vmSnapshot.Labels == nil {
vmSnapshot.Labels = make(map[string]string)
}
// Ensure valid source reference
if vmSnapshot.Spec.Source.Name == "" {
vmSnapshot.Spec.Source.Name = generateValidK8sName(c, "vm")
}
if vmSnapshot.Spec.Source.Kind == "" {
vmSnapshot.Spec.Source.Kind = "VirtualMachine"
}
if vmSnapshot.Spec.Source.APIGroup == nil {
apiGroup := virtv1.SchemeGroupVersion.Group
vmSnapshot.Spec.Source.APIGroup = &apiGroup
}
// Set deletion policy if nil
if vmSnapshot.Spec.DeletionPolicy == nil {
deletionPolicy := snapshotv1.VirtualMachineSnapshotContentDelete
vmSnapshot.Spec.DeletionPolicy = &deletionPolicy
}
}
}
// CustomVMSnapshotContentFuzzer creates a custom fuzzer for VirtualMachineSnapshotContent
func CustomVMSnapshotContentFuzzer(namespace string) func(*snapshotv1.VirtualMachineSnapshotContent, gofuzzheaders.Continue) {
return func(content *snapshotv1.VirtualMachineSnapshotContent, c gofuzzheaders.Continue) {
// Ensure valid metadata
content.Namespace = namespace
content.Name = generateValidK8sName(c, "snapshot-content")
if content.Labels == nil {
content.Labels = make(map[string]string)
}
// Ensure valid source reference
if content.Spec.Source.VirtualMachine != nil && content.Spec.Source.VirtualMachine.Name == "" {
content.Spec.Source.VirtualMachine.Name = generateValidK8sName(c, "vm")
}
// Ensure valid snapshot reference
if content.Spec.VirtualMachineSnapshotName == nil {
snapshotName := generateValidK8sName(c, "snapshot")
content.Spec.VirtualMachineSnapshotName = &snapshotName
}
}
}
// GetCustomFuzzFuncs returns a slice of custom fuzz functions for the given namespace
func GetCustomFuzzFuncs(namespace string) []interface{} {
return []interface{}{
CustomObjectMetaFuzzer(namespace),
CustomPodFuzzer(),
CustomPVCFuzzer(),
CustomDataVolumeFuzzer(),
CustomVMIFuzzer(namespace),
CustomVMFuzzer(namespace),
CustomNodeFuzzer(),
}
}
package topology
import (
"math"
"k8s.io/client-go/tools/cache"
v1 "k8s.io/api/core/v1"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
)
const TSCFrequencyLabel = virtv1.CPUTimerLabel + "tsc-frequency"
const TSCFrequencySchedulingLabel = "scheduling.node.kubevirt.io/tsc-frequency"
const TSCScalableLabel = virtv1.CPUTimerLabel + "tsc-scalable"
const TSCTolerancePPM float64 = 250
type FilterPredicateFunc func(node *v1.Node) bool
func IsSchedulable(node *v1.Node) bool {
if node == nil {
return false
}
return node.Labels[virtv1.NodeSchedulable] == "true"
}
func HasInvTSCFrequency(node *v1.Node) bool {
if node == nil {
return false
}
freq, _, err := TSCFrequencyFromNode(node)
if err != nil {
log.DefaultLogger().Reason(err).Errorf("Excluding node %s with invalid tsc-frequency", node.Name)
return false
} else if freq == 0 {
return false
}
return true
}
func TSCFrequencyGreaterEqual(frequency int64) FilterPredicateFunc {
return func(node *v1.Node) bool {
if node == nil {
return false
}
freq, scalable, err := TSCFrequencyFromNode(node)
if err != nil {
log.DefaultLogger().Reason(err).Errorf("Excluding node %s with invalid tsc-frequency", node.Name)
return false
} else if freq == 0 {
return false
}
return (scalable && freq >= frequency) || (freq == frequency && !scalable)
}
}
func NodeOfVMI(vmi *virtv1.VirtualMachineInstance) FilterPredicateFunc {
return func(node *v1.Node) bool {
if vmi.Status.NodeName == "" {
return false
}
if node == nil {
return false
}
if node.Name == vmi.Status.NodeName {
return true
}
return false
}
}
func Not(f FilterPredicateFunc) FilterPredicateFunc {
return func(node *v1.Node) bool {
return !f(node)
}
}
func Or(predicates ...FilterPredicateFunc) FilterPredicateFunc {
return func(node *v1.Node) bool {
for _, p := range predicates {
if p(node) {
return true
}
}
return false
}
}
func FilterNodesFromCache(objs []interface{}, predicates ...FilterPredicateFunc) []*v1.Node {
match := []*v1.Node{}
for _, obj := range objs {
node := obj.(*v1.Node)
passes := true
for _, p := range predicates {
if !p(node) {
passes = false
break
}
}
if passes {
match = append(match, node)
}
}
return match
}
func IsNodeRunningVmis(vmiStore cache.Store) FilterPredicateFunc {
return func(node *v1.Node) bool {
if node == nil {
return false
}
for _, vmi := range vmiStore.List() {
vmi := vmi.(*virtv1.VirtualMachineInstance)
if vmi.Status.NodeName == node.Name {
return true
}
}
return false
}
}
// ToleranceForFrequency returns TSCTolerancePPM parts per million of freq, rounded down to the nearest Hz
func ToleranceForFrequency(freq int64) int64 {
return int64(math.Floor(float64(freq) * (TSCTolerancePPM / 1000000)))
}
// Code generated by MockGen. DO NOT EDIT.
// Source: hinter.go
//
// Generated by this command:
//
// mockgen -source hinter.go -package=topology -destination=generated_mock_hinter.go
//
// Package topology is a generated GoMock package.
package topology
import (
reflect "reflect"
gomock "go.uber.org/mock/gomock"
v1 "kubevirt.io/api/core/v1"
)
// MockHinter is a mock of Hinter interface.
type MockHinter struct {
ctrl *gomock.Controller
recorder *MockHinterMockRecorder
isgomock struct{}
}
// MockHinterMockRecorder is the mock recorder for MockHinter.
type MockHinterMockRecorder struct {
mock *MockHinter
}
// NewMockHinter creates a new mock instance.
func NewMockHinter(ctrl *gomock.Controller) *MockHinter {
mock := &MockHinter{ctrl: ctrl}
mock.recorder = &MockHinterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHinter) EXPECT() *MockHinterMockRecorder {
return m.recorder
}
// IsTscFrequencyRequired mocks base method.
func (m *MockHinter) IsTscFrequencyRequired(vmi *v1.VirtualMachineInstance) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsTscFrequencyRequired", vmi)
ret0, _ := ret[0].(bool)
return ret0
}
// IsTscFrequencyRequired indicates an expected call of IsTscFrequencyRequired.
func (mr *MockHinterMockRecorder) IsTscFrequencyRequired(vmi any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsTscFrequencyRequired", reflect.TypeOf((*MockHinter)(nil).IsTscFrequencyRequired), vmi)
}
// LowestTSCFrequencyOnCluster mocks base method.
func (m *MockHinter) LowestTSCFrequencyOnCluster() (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LowestTSCFrequencyOnCluster")
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LowestTSCFrequencyOnCluster indicates an expected call of LowestTSCFrequencyOnCluster.
func (mr *MockHinterMockRecorder) LowestTSCFrequencyOnCluster() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LowestTSCFrequencyOnCluster", reflect.TypeOf((*MockHinter)(nil).LowestTSCFrequencyOnCluster))
}
// TSCFrequenciesInUse mocks base method.
func (m *MockHinter) TSCFrequenciesInUse() []int64 {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TSCFrequenciesInUse")
ret0, _ := ret[0].([]int64)
return ret0
}
// TSCFrequenciesInUse indicates an expected call of TSCFrequenciesInUse.
func (mr *MockHinterMockRecorder) TSCFrequenciesInUse() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TSCFrequenciesInUse", reflect.TypeOf((*MockHinter)(nil).TSCFrequenciesInUse))
}
// TopologyHintsForVMI mocks base method.
func (m *MockHinter) TopologyHintsForVMI(vmi *v1.VirtualMachineInstance) (*v1.TopologyHints, TscFrequencyRequirementType, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TopologyHintsForVMI", vmi)
ret0, _ := ret[0].(*v1.TopologyHints)
ret1, _ := ret[1].(TscFrequencyRequirementType)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// TopologyHintsForVMI indicates an expected call of TopologyHintsForVMI.
func (mr *MockHinterMockRecorder) TopologyHintsForVMI(vmi any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TopologyHintsForVMI", reflect.TypeOf((*MockHinter)(nil).TopologyHintsForVMI), vmi)
}
// Code generated by MockGen. DO NOT EDIT.
// Source: nodetopologyupdater.go
//
// Generated by this command:
//
// mockgen -source nodetopologyupdater.go -package=topology -destination=generated_mock_nodetopologyupdater.go
//
// Package topology is a generated GoMock package.
package topology
import (
reflect "reflect"
time "time"
gomock "go.uber.org/mock/gomock"
)
// MockNodeTopologyUpdater is a mock of NodeTopologyUpdater interface.
type MockNodeTopologyUpdater struct {
ctrl *gomock.Controller
recorder *MockNodeTopologyUpdaterMockRecorder
isgomock struct{}
}
// MockNodeTopologyUpdaterMockRecorder is the mock recorder for MockNodeTopologyUpdater.
type MockNodeTopologyUpdaterMockRecorder struct {
mock *MockNodeTopologyUpdater
}
// NewMockNodeTopologyUpdater creates a new mock instance.
func NewMockNodeTopologyUpdater(ctrl *gomock.Controller) *MockNodeTopologyUpdater {
mock := &MockNodeTopologyUpdater{ctrl: ctrl}
mock.recorder = &MockNodeTopologyUpdaterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockNodeTopologyUpdater) EXPECT() *MockNodeTopologyUpdaterMockRecorder {
return m.recorder
}
// Run mocks base method.
func (m *MockNodeTopologyUpdater) Run(interval time.Duration, stopChan <-chan struct{}) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Run", interval, stopChan)
}
// Run indicates an expected call of Run.
func (mr *MockNodeTopologyUpdaterMockRecorder) Run(interval, stopChan any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockNodeTopologyUpdater)(nil).Run), interval, stopChan)
}
package topology
//go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE
import (
"fmt"
"k8s.io/client-go/tools/cache"
"kubevirt.io/kubevirt/pkg/pointer"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
k6tv1 "kubevirt.io/api/core/v1"
)
type Hinter interface {
TopologyHintsForVMI(vmi *k6tv1.VirtualMachineInstance) (hints *k6tv1.TopologyHints, requirement TscFrequencyRequirementType, err error)
IsTscFrequencyRequired(vmi *k6tv1.VirtualMachineInstance) bool
TSCFrequenciesInUse() []int64
LowestTSCFrequencyOnCluster() (int64, error)
}
type topologyHinter struct {
clusterConfig *virtconfig.ClusterConfig
nodeStore cache.Store
vmiStore cache.Store
}
func (t *topologyHinter) IsTscFrequencyRequired(vmi *k6tv1.VirtualMachineInstance) bool {
return vmi.Spec.Architecture == "amd64" && GetTscFrequencyRequirement(vmi).Type != NotRequired
}
func (t *topologyHinter) TopologyHintsForVMI(vmi *k6tv1.VirtualMachineInstance) (hints *k6tv1.TopologyHints, requirement TscFrequencyRequirementType, err error) {
requirement = GetTscFrequencyRequirement(vmi).Type
if requirement == NotRequired || vmi.Spec.Architecture != "amd64" {
return
}
freq, err := t.LowestTSCFrequencyOnCluster()
if err != nil {
return nil, requirement, fmt.Errorf("failed to determine the lowest tsc frequency on the cluster: %v", err)
}
hints = &k6tv1.TopologyHints{TSCFrequency: pointer.P(int64(freq))}
return
}
func (t *topologyHinter) LowestTSCFrequencyOnCluster() (int64, error) {
configTSCFrequency := t.clusterConfig.GetMinimumClusterTSCFrequency()
if configTSCFrequency != nil {
if *configTSCFrequency > 0 {
return *configTSCFrequency, nil
} else {
return 0, fmt.Errorf("the configured minimumClusterTSCFrequency must be greater 0, but got %d", *configTSCFrequency)
}
}
nodes := FilterNodesFromCache(t.nodeStore.List(),
HasInvTSCFrequency,
Or(
IsSchedulable,
IsNodeRunningVmis(t.vmiStore),
),
)
freq := LowestTSCFrequency(nodes)
return freq, nil
}
func (t *topologyHinter) TSCFrequenciesInUse() []int64 {
frequencyMap := map[int64]struct{}{}
for _, obj := range t.vmiStore.List() {
vmi := obj.(*k6tv1.VirtualMachineInstance)
if AreTSCFrequencyTopologyHintsDefined(vmi) {
frequencyMap[*vmi.Status.TopologyHints.TSCFrequency] = struct{}{}
}
}
frequencies := []int64{}
for freq := range frequencyMap {
frequencies = append(frequencies, freq)
}
return frequencies
}
func NewTopologyHinter(nodeStore cache.Store, vmiStore cache.Store, clusterConfig *virtconfig.ClusterConfig) *topologyHinter {
return &topologyHinter{nodeStore: nodeStore, vmiStore: vmiStore, clusterConfig: clusterConfig}
}
package topology
//go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE
import (
"fmt"
"time"
nodeutils "kubevirt.io/kubevirt/pkg/util/nodes"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
)
type NodeTopologyUpdater interface {
Run(interval time.Duration, stopChan <-chan struct{})
}
type nodeTopologyUpdater struct {
nodeInformer cache.SharedIndexInformer
hinter Hinter
client kubecli.KubevirtClient
}
type updateStats struct {
updated int
skipped int
error int
}
func (n *nodeTopologyUpdater) Run(interval time.Duration, stopChan <-chan struct{}) {
cache.WaitForCacheSync(stopChan, n.nodeInformer.HasSynced)
wait.JitterUntil(func() {
nodes := FilterNodesFromCache(n.nodeInformer.GetStore().List(),
HasInvTSCFrequency,
)
stats := n.sync(nodes)
if stats.updated != 0 || stats.error != 0 {
log.DefaultLogger().Infof("TSC Frequency node update status: %d updated, %d skipped, %d errors", stats.updated, stats.skipped, stats.error)
}
}, interval, 1.2, true, stopChan)
}
func (n *nodeTopologyUpdater) sync(nodes []*v1.Node) *updateStats {
requiredFrequencies, err := n.requiredFrequencies()
if err != nil {
log.DefaultLogger().Reason(err).Error("Skipping TSC frequency updates on all nodes")
return &updateStats{skipped: len(nodes)}
}
stats := &updateStats{}
for _, node := range nodes {
nodeCopy, err := calculateNodeLabelChanges(node, requiredFrequencies)
if err != nil {
stats.error++
log.DefaultLogger().Object(node).Reason(err).Error("Could not calculate TSC frequencies for node")
continue
}
if !equality.Semantic.DeepEqual(node.Labels, nodeCopy.Labels) {
if err := nodeutils.PatchNode(n.client, node, nodeCopy); err != nil {
stats.error++
log.DefaultLogger().Object(node).Reason(err).Error("Could not patch TSC frequencies for node")
continue
}
stats.updated++
} else {
stats.skipped++
}
}
return stats
}
func calculateNodeLabelChanges(original *v1.Node, requiredFrequencies []int64) (modified *v1.Node, err error) {
nodeFreq, scalable, err := TSCFrequencyFromNode(original)
if err != nil {
log.DefaultLogger().Reason(err).Object(original).Errorf("Can't determine original TSC frequency of node %s", original.Name)
return nil, err
}
freqsOnNode := TSCFrequenciesOnNode(original)
toAdd, toRemove := CalculateTSCLabelDiff(requiredFrequencies, freqsOnNode, nodeFreq, scalable)
toAddLabels := ToTSCSchedulableLabels(toAdd)
toRemoveLabels := ToTSCSchedulableLabels(toRemove)
nodeCopy := original.DeepCopy()
for _, freq := range toAddLabels {
nodeCopy.Labels[freq] = "true"
}
for _, freq := range toRemoveLabels {
delete(nodeCopy.Labels, freq)
}
return nodeCopy, nil
}
func (n nodeTopologyUpdater) requiredFrequencies() ([]int64, error) {
lowestFrequency, err := n.hinter.LowestTSCFrequencyOnCluster()
if err != nil {
return nil, fmt.Errorf("failed to calculate lowest TSC frequency for nodes: %v", err)
}
return append(n.hinter.TSCFrequenciesInUse(), lowestFrequency), nil
}
func NewNodeTopologyUpdater(clientset kubecli.KubevirtClient, hinter Hinter, nodeInformer cache.SharedIndexInformer) NodeTopologyUpdater {
return &nodeTopologyUpdater{
client: clientset,
hinter: hinter,
nodeInformer: nodeInformer,
}
}
package topology
import (
"fmt"
"strconv"
"strings"
v1 "k8s.io/api/core/v1"
k6tv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
)
type TscFrequencyRequirementType int
const (
RequiredForBoot TscFrequencyRequirementType = iota
RequiredForMigration
NotRequired
)
type TscFrequencyRequirement struct {
Type TscFrequencyRequirementType
Reason string
}
func LowestTSCFrequency(nodes []*v1.Node) int64 {
var lowest int64
for i, node := range nodes {
freq, _, err := TSCFrequencyFromNode(node)
if err != nil {
log.DefaultLogger().Reason(err).Errorf("Excluding node %s with invalid tsc-frequency", node.Name)
}
if freq > 0 && (i == 0 || freq < lowest) {
lowest = freq
}
}
return lowest
}
func TSCFrequencyFromNode(node *v1.Node) (frequency int64, scalable bool, err error) {
if val, exists := node.Labels[TSCScalableLabel]; exists {
scalable = val == "true"
}
if val, exists := node.Labels[TSCFrequencyLabel]; exists {
freq, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return 0, false, fmt.Errorf("tsc frequency on node %v is not an int: %v", node.Name, err)
} else if freq <= 0 {
return 0, false, fmt.Errorf("tsc frequency on node %v is invalid: expected a frequenchy bigger than 0, but got %v", node.Name, freq)
}
return freq, scalable, err
}
return 0, false, nil
}
func TSCFrequenciesOnNode(node *v1.Node) (frequencies []int64) {
for key := range node.Labels {
if strings.HasPrefix(key, TSCFrequencySchedulingLabel+"-") {
freq, err := strconv.ParseInt(strings.TrimPrefix(key, TSCFrequencySchedulingLabel+"-"), 10, 64)
if err != nil {
log.DefaultLogger().Object(node).Reason(err).Errorf("Label %s is invalid", key)
continue
}
frequencies = append(frequencies, freq)
}
}
return
}
func distance(freq1, freq2 int64) int64 {
if freq1 > freq2 {
return freq1 - freq2
}
return freq2 - freq1
}
func CalculateTSCLabelDiff(frequenciesInUse []int64, frequenciesOnNode []int64, nodeFrequency int64, scalable bool) (toAdd []int64, toRemove []int64) {
frequenciesInUse = append(frequenciesInUse, nodeFrequency)
tolerance := ToleranceForFrequency(nodeFrequency)
requiredMap := map[int64]struct{}{}
for _, freq := range frequenciesInUse {
if !scalable && distance(freq, nodeFrequency) > tolerance {
// A non-scalable node can only accept frequencies that are within Qemu's tolerance:
// nodeFrequency*(1-0.000250) < acceptableFrequency < nodeFrequency*(1+0.000250).
// Skip the frequencies that are outside that range
continue
}
requiredMap[freq] = struct{}{}
}
for _, freq := range frequenciesOnNode {
if _, exists := requiredMap[freq]; !exists {
toRemove = append(toRemove, freq)
}
}
for freq := range requiredMap {
// For the non-scalable case, the map was already sanitized above.
// For the scalable case, a node can accept frequencies that are either lower than its own or within the tolerance range
if !scalable || freq <= nodeFrequency || distance(freq, nodeFrequency) <= tolerance {
toAdd = append(toAdd, freq)
}
}
return
}
func ToTSCSchedulableLabels(frequencies []int64) (labels []string) {
for _, freq := range frequencies {
labels = append(labels, ToTSCSchedulableLabel(freq))
}
return
}
func ToTSCSchedulableLabel(frequency int64) string {
return fmt.Sprintf("%s-%d", TSCFrequencySchedulingLabel, frequency)
}
func AreTSCFrequencyTopologyHintsDefined(vmi *k6tv1.VirtualMachineInstance) bool {
if vmi == nil {
return false
}
topologyHints := vmi.Status.TopologyHints
return topologyHints != nil && topologyHints.TSCFrequency != nil && *topologyHints.TSCFrequency > 0
}
func IsManualTSCFrequencyRequired(vmi *k6tv1.VirtualMachineInstance) bool {
return vmi != nil &&
GetTscFrequencyRequirement(vmi).Type != NotRequired &&
AreTSCFrequencyTopologyHintsDefined(vmi)
}
func GetTscFrequencyRequirement(vmi *k6tv1.VirtualMachineInstance) TscFrequencyRequirement {
newRequirement := func(reqType TscFrequencyRequirementType, reason string) TscFrequencyRequirement {
return TscFrequencyRequirement{Type: reqType, Reason: reason}
}
if vmiHasInvTSCFeature(vmi) {
return newRequirement(RequiredForBoot, "VMI with invtsc CPU feature must have tsc frequency defined in order to boot")
}
if isVmiUsingHyperVReenlightenment(vmi) {
return newRequirement(RequiredForMigration, "HyperV Reenlightenment VMIs cannot migrate when TSC Frequency is not exposed on the cluster: guest timers might be inconsistent")
}
return newRequirement(NotRequired, "")
}
func vmiHasInvTSCFeature(vmi *k6tv1.VirtualMachineInstance) bool {
if cpu := vmi.Spec.Domain.CPU; cpu != nil {
for _, f := range cpu.Features {
if f.Name != "invtsc" {
continue
}
switch f.Policy {
case "require", "force":
return true
}
}
}
return false
}
func isVmiUsingHyperVReenlightenment(vmi *k6tv1.VirtualMachineInstance) bool {
if vmi == nil {
return false
}
domainFeatures := vmi.Spec.Domain.Features
return domainFeatures != nil && domainFeatures.Hyperv != nil && domainFeatures.Hyperv.Reenlightenment != nil &&
domainFeatures.Hyperv.Reenlightenment.Enabled != nil && *domainFeatures.Hyperv.Reenlightenment.Enabled
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package util
import (
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
typesutil "kubevirt.io/kubevirt/pkg/storage/types"
)
func ProcessWorkItem(queue workqueue.TypedRateLimitingInterface[string], handler func(string) (time.Duration, error)) bool {
obj, shutdown := queue.Get()
if shutdown {
return false
}
err := func(key string) error {
defer queue.Done(obj)
if requeueAfter, err := handler(key); requeueAfter > 0 || err != nil {
if requeueAfter > 0 {
queue.AddAfter(key, requeueAfter)
} else {
queue.AddRateLimited(key)
}
return err
}
queue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
func PodsUsingPVCs(podInformer cache.SharedIndexInformer, namespace string, pvcNames sets.String) ([]corev1.Pod, error) {
var pods []corev1.Pod
if pvcNames.Len() < 1 {
return pods, nil
}
objs, err := podInformer.GetIndexer().ByIndex(cache.NamespaceIndex, namespace)
if err != nil {
return nil, err
}
for _, obj := range objs {
pod, ok := obj.(*corev1.Pod)
if !ok {
return nil, fmt.Errorf("expected Pod, got %T", obj)
}
if pod.Status.Phase == corev1.PodSucceeded {
continue
}
for _, volume := range pod.Spec.Volumes {
if volume.VolumeSource.PersistentVolumeClaim != nil &&
pvcNames.Has(volume.PersistentVolumeClaim.ClaimName) {
pods = append(pods, *pod)
}
}
}
return pods, nil
}
func CreateDataVolumeManifest(clientset kubecli.KubevirtClient, dataVolumeTemplate virtv1.DataVolumeTemplateSpec, vm *virtv1.VirtualMachine) (*cdiv1.DataVolume, error) {
newDataVolume, err := typesutil.GenerateDataVolumeFromTemplate(clientset, dataVolumeTemplate, vm.Namespace, vm.Spec.Template.Spec.PriorityClassName)
if err != nil {
return nil, err
}
newDataVolume.ObjectMeta.Labels[virtv1.CreatedByLabel] = string(vm.UID)
newDataVolume.ObjectMeta.OwnerReferences = []v1.OwnerReference{
*v1.NewControllerRef(vm, virtv1.VirtualMachineGroupVersionKind),
}
return newDataVolume, nil
}
/*
Copyright The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vm
import (
"context"
"fmt"
"github.com/google/uuid"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubevirt"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/common"
)
type FirmwareController struct {
clientset kubevirt.Interface
}
const (
firmwareUUIDErrorReason = "FirmwareUUIDError"
)
func NewFirmwareController(clientset kubevirt.Interface) *FirmwareController {
return &FirmwareController{
clientset: clientset,
}
}
func (fc *FirmwareController) Sync(vm *v1.VirtualMachine, _ *v1.VirtualMachineInstance) (*v1.VirtualMachine, error) {
firmware := vm.Spec.Template.Spec.Domain.Firmware
if firmware == nil {
firmware = &v1.Firmware{}
}
if firmware.UUID != "" {
return vm, nil
}
firmware = firmware.DeepCopy()
firmware.UUID = CalculateLegacyUUID(vm.Name)
updatedVM, err := fc.vmFirmwarePatch(firmware, vm)
if err != nil {
return vm, common.NewSyncError(fmt.Errorf("error encountered when trying to patch VM firmware: %w", err), firmwareUUIDErrorReason)
}
return updatedVM, nil
}
func (fc *FirmwareController) vmFirmwarePatch(updatedFirmware *v1.Firmware, vm *v1.VirtualMachine) (*v1.VirtualMachine, error) {
patchBytes, err := patch.New(
patch.WithTest("/spec/template/spec/domain/firmware", vm.Spec.Template.Spec.Domain.Firmware),
patch.WithAdd("/spec/template/spec/domain/firmware", updatedFirmware),
).GeneratePayload()
if err != nil {
return vm, err
}
return fc.clientset.KubevirtV1().
VirtualMachines(vm.Namespace).
Patch(context.Background(), vm.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
}
const magicUUID = "6a1a24a1-4061-4607-8bf4-a3963d0c5895"
var firmwareUUIDns = uuid.MustParse(magicUUID)
func CalculateLegacyUUID(name string) types.UID {
return types.UID(uuid.NewSHA1(firmwareUUIDns, []byte(name)).String())
}
// Copyright 2025 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package vm
import (
"encoding/json"
"fmt"
"reflect"
jsonpatch "github.com/evanphx/json-patch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/testing"
)
// PatchReactor should be used to replace default reactor
// handle - takes subresources and should return if the request should be handled, e.g /status
// modify - takes new and old object and should return object that should be stored
func PatchReactor(handle func(string) bool, tracker testing.ObjectTracker,
modify func(new, old runtime.Object) runtime.Object,
) func(action testing.Action) (handled bool, ret runtime.Object, err error) {
return func(action testing.Action) (handled bool, ret runtime.Object, err error) {
if !handle(action.GetSubresource()) {
return false, nil, nil
}
switch action := action.(type) {
case testing.PatchActionImpl:
obj, err := tracker.Get(action.GetResource(), action.GetNamespace(), action.GetName())
if err != nil {
return true, nil, err
}
oldObj, err := tracker.Get(action.GetResource(), action.GetNamespace(), action.GetName())
if err != nil {
return true, nil, err
}
old, err := json.Marshal(obj)
if err != nil {
return true, nil, err
}
// reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields
// in obj that are removed by patch are cleared
value := reflect.ValueOf(obj)
value.Elem().Set(reflect.New(value.Type().Elem()).Elem())
switch action.GetPatchType() {
case types.JSONPatchType:
patch, err := jsonpatch.DecodePatch(action.GetPatch())
if err != nil {
return true, nil, err
}
modified, err := patch.Apply(old)
if err != nil {
return true, nil, err
}
if err = json.Unmarshal(modified, obj); err != nil {
return true, nil, err
}
case types.MergePatchType:
modified, err := jsonpatch.MergePatch(old, action.GetPatch())
if err != nil {
return true, nil, err
}
if err := json.Unmarshal(modified, obj); err != nil {
return true, nil, err
}
case types.StrategicMergePatchType, types.ApplyPatchType:
mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj)
if err != nil {
return true, nil, err
}
if err = json.Unmarshal(mergedByte, obj); err != nil {
return true, nil, err
}
default:
return true, nil, fmt.Errorf("PatchType is not supported")
}
obj = modify(obj, oldObj)
if err = tracker.Update(action.GetResource(), obj, action.GetNamespace()); err != nil {
return true, nil, err
}
return true, obj, nil
default:
panic("Unexpected action implementation")
}
}
}
// Copyright 2025 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package vm
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/testing"
v1 "kubevirt.io/api/core/v1"
)
// SubresourceHandle should be use for status Update/Patch
func SubresourceHandle(subresources string) bool {
return subresources == "status"
}
// Handle should be used for Update/Patch without status
func Handle(subresources string) bool {
return subresources != "status"
}
// ModifyStatusOnlyVM ignores any updates other than to status
func ModifyStatusOnlyVM(new, old runtime.Object) runtime.Object {
vm := new.(*v1.VirtualMachine)
oldVM := old.(*v1.VirtualMachine)
oldVM = oldVM.DeepCopy()
oldVM.Status = *vm.Status.DeepCopy()
return oldVM
}
// ModifyVM ignores updates to status
func ModifyVM(new, old runtime.Object) runtime.Object {
vm := new.(*v1.VirtualMachine)
oldVM := old.(*v1.VirtualMachine)
oldVM = oldVM.DeepCopy()
oldVM.Spec = *vm.Spec.DeepCopy()
oldVM.ObjectMeta = *vm.ObjectMeta.DeepCopy()
return oldVM
}
// UpdateReactor should be used to replace default reactor
// handle - takes subresources and should return if the request should be handled, e.g /status
// modify - takes new and old object and should return object that should be stored
func UpdateReactor(handle func(string) bool, tracker testing.ObjectTracker,
modify func(new, old runtime.Object) runtime.Object,
) func(action testing.Action) (handled bool, ret runtime.Object, err error) {
return func(action testing.Action) (handled bool, ret runtime.Object, err error) {
if !handle(action.GetSubresource()) {
return false, nil, nil
}
switch action := action.(type) {
case testing.UpdateActionImpl:
objMeta, err := meta.Accessor(action.GetObject())
if err != nil {
return true, nil, err
}
oldObj, err := tracker.Get(action.GetResource(), action.GetNamespace(), objMeta.GetName())
if err != nil {
return true, nil, err
}
modifiedObj := modify(action.GetObject(), oldObj)
err = tracker.Update(action.GetResource(), modifiedObj, action.GetNamespace())
if err != nil {
return true, nil, err
}
oldObj, err = tracker.Get(action.GetResource(), action.GetNamespace(), objMeta.GetName())
return true, oldObj, err
default:
panic("Unexpected action implementation")
}
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vm
import (
"context"
"encoding/json"
"errors"
"fmt"
"maps"
"math"
"math/rand"
"strconv"
"strings"
"time"
"kubevirt.io/kubevirt/pkg/instancetype/revision"
"kubevirt.io/kubevirt/pkg/libvmi"
"kubevirt.io/kubevirt/pkg/liveupdate/memory"
"kubevirt.io/kubevirt/pkg/pointer"
netadmitter "kubevirt.io/kubevirt/pkg/network/admitter"
netvmispec "kubevirt.io/kubevirt/pkg/network/vmispec"
netvmliveupdate "kubevirt.io/kubevirt/pkg/network/vmliveupdate"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/common"
watchutil "kubevirt.io/kubevirt/pkg/virt-controller/watch/util"
appsv1 "k8s.io/api/apps/v1"
authv1 "k8s.io/api/authorization/v1"
k8score "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/trace"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/storage/cbt"
storagehotplug "kubevirt.io/kubevirt/pkg/storage/hotplug"
"kubevirt.io/kubevirt/pkg/storage/memorydump"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/util"
"kubevirt.io/kubevirt/pkg/util/hardware"
"kubevirt.io/kubevirt/pkg/util/migrations"
traceUtils "kubevirt.io/kubevirt/pkg/util/trace"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/descheduler"
volumemig "kubevirt.io/kubevirt/pkg/virt-controller/watch/volume-migration"
)
const (
fetchingRunStrategyErrFmt = "Error fetching RunStrategy: %v"
fetchingVMKeyErrFmt = "Error fetching vmKey: %v"
startingVMIFailureFmt = "Failure while starting VMI: %v"
nonReceiverVMI = "Found non receiver VMI while VM is receiver"
)
type CloneAuthFunc func(dv *cdiv1.DataVolume, requestNamespace, requestName string, proxy cdiv1.AuthorizationHelperProxy, saNamespace, saName string) (bool, string, error)
// Repeating info / error messages
const (
stoppingVmMsg = "Stopping VM"
startingVmMsg = "Starting VM"
startingVmReceiverMsg = "Starting VM as receiver"
failedExtractVmkeyFromVmErrMsg = "Failed to extract vmKey from VirtualMachine."
failedCreateCRforVmErrMsg = "Failed to create controller revision for VirtualMachine."
failedProcessDeleteNotificationErrMsg = "Failed to process delete notification"
failureDeletingVmiErrFormat = "Failure attempting to delete VMI: %v"
failedManualRecoveryRequiredCondSetErrMsg = "cannot start the VM since it has the manual recovery required condtion set"
// UnauthorizedDataVolumeCreateReason is added in an event when the DataVolume
// ServiceAccount doesn't have permission to create a DataVolume
UnauthorizedDataVolumeCreateReason = "UnauthorizedDataVolumeCreate"
// FailedDataVolumeCreateReason is added in an event when posting a dynamically
// generated dataVolume to the cluster fails.
FailedDataVolumeCreateReason = "FailedDataVolumeCreate"
// SuccessfulDataVolumeCreateReason is added in an event when a dynamically generated
// dataVolume is successfully created
SuccessfulDataVolumeCreateReason = "SuccessfulDataVolumeCreate"
// SourcePVCNotAvailabe is added in an event when the source PVC of a valid
// clone Datavolume doesn't exist
SourcePVCNotAvailabe = "SourcePVCNotAvailabe"
)
const (
hotplugVolumeErrorReason = "HotPlugVolumeError"
hotplugCPUErrorReason = "HotPlugCPUError"
failedUpdateErrorReason = "FailedUpdateError"
failedCreateReason = "FailedCreate"
vmiFailedDeleteReason = "FailedDelete"
affinityChangeErrorReason = "AffinityChangeError"
hotplugMemoryErrorReason = "HotPlugMemoryError"
volumesUpdateErrorReason = "VolumesUpdateError"
tolerationsChangeErrorReason = "TolerationsChangeError"
annotationsLabelsChangeErrorReason = "AnnotationsLabelsChangeError"
)
const defaultMaxCrashLoopBackoffDelaySeconds = 300
func NewController(vmiInformer cache.SharedIndexInformer,
vmInformer cache.SharedIndexInformer,
dataVolumeInformer cache.SharedIndexInformer,
dataSourceInformer cache.SharedIndexInformer,
kubeVirtInformer cache.SharedIndexInformer,
namespaceInformer cache.SharedIndexInformer,
pvcInformer cache.SharedIndexInformer,
crInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
clientset kubecli.KubevirtClient,
clusterConfig *virtconfig.ClusterConfig,
netSynchronizer synchronizer,
firmwareSynchronizer synchronizer,
instancetypeController instancetypeHandler,
additionalLauncherAnnotationsSync []string,
additionalLauncherLabelsSync []string,
) (*Controller, error) {
c := &Controller{
Queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-vm"},
),
vmiIndexer: vmiInformer.GetIndexer(),
vmIndexer: vmInformer.GetIndexer(),
dataVolumeStore: dataVolumeInformer.GetStore(),
dataSourceStore: dataSourceInformer.GetStore(),
namespaceStore: namespaceInformer.GetStore(),
pvcStore: pvcInformer.GetStore(),
crIndexer: crInformer.GetIndexer(),
instancetypeController: instancetypeController,
recorder: recorder,
clientset: clientset,
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
dataVolumeExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
cloneAuthFunc: func(dv *cdiv1.DataVolume, requestNamespace, requestName string, proxy cdiv1.AuthorizationHelperProxy, saNamespace, saName string) (bool, string, error) {
response, err := dv.AuthorizeSA(requestNamespace, requestName, proxy, saNamespace, saName)
return response.Allowed, response.Reason, err
},
clusterConfig: clusterConfig,
netSynchronizer: netSynchronizer,
firmwareSynchronizer: firmwareSynchronizer,
additionalLauncherAnnotationsSync: additionalLauncherAnnotationsSync,
additionalLauncherLabelsSync: additionalLauncherLabelsSync,
}
c.hasSynced = func() bool {
return vmiInformer.HasSynced() && vmInformer.HasSynced() &&
dataVolumeInformer.HasSynced() && dataSourceInformer.HasSynced() &&
pvcInformer.HasSynced() && crInformer.HasSynced()
}
_, err := vmInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVirtualMachine,
DeleteFunc: c.deleteVirtualMachine,
UpdateFunc: c.updateVirtualMachine,
})
if err != nil {
return nil, err
}
_, err = vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVirtualMachineInstance,
DeleteFunc: c.deleteVirtualMachineInstance,
UpdateFunc: c.updateVirtualMachineInstance,
})
if err != nil {
return nil, err
}
_, err = dataVolumeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addDataVolume,
DeleteFunc: c.deleteDataVolume,
UpdateFunc: c.updateDataVolume,
})
if err != nil {
return nil, err
}
_, err = pvcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPVC,
})
if err != nil {
return nil, err
}
_, err = kubeVirtInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.handleKubeVirtUpdate,
})
if err != nil {
return nil, err
}
_, err = namespaceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.handleNamespaceUpdate,
})
if err != nil {
return nil, err
}
return c, nil
}
type authProxy struct {
client kubecli.KubevirtClient
dataSourceStore cache.Store
namespaceStore cache.Store
}
func (p *authProxy) CreateSar(sar *authv1.SubjectAccessReview) (*authv1.SubjectAccessReview, error) {
return p.client.AuthorizationV1().SubjectAccessReviews().Create(context.Background(), sar, metav1.CreateOptions{})
}
func (p *authProxy) GetNamespace(name string) (*k8score.Namespace, error) {
obj, exists, err := p.namespaceStore.GetByKey(name)
if err != nil {
return nil, err
} else if !exists {
return nil, fmt.Errorf("namespace %s does not exist", name)
}
ns := obj.(*k8score.Namespace).DeepCopy()
return ns, nil
}
func (p *authProxy) GetDataSource(namespace, name string) (*cdiv1.DataSource, error) {
key := fmt.Sprintf("%s/%s", namespace, name)
obj, exists, err := p.dataSourceStore.GetByKey(key)
if err != nil {
return nil, err
} else if !exists {
return nil, fmt.Errorf("dataSource %s does not exist", key)
}
ds := obj.(*cdiv1.DataSource).DeepCopy()
return ds, nil
}
type synchronizer interface {
Sync(*virtv1.VirtualMachine, *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachine, error)
}
type instancetypeHandler interface {
synchronizer
ApplyToVM(*virtv1.VirtualMachine) error
ApplyToVMI(*virtv1.VirtualMachine, *virtv1.VirtualMachineInstance) error
ApplyAutoAttachPreferences(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error
}
type Controller struct {
clientset kubecli.KubevirtClient
Queue workqueue.TypedRateLimitingInterface[string]
vmiIndexer cache.Indexer
vmIndexer cache.Indexer
dataVolumeStore cache.Store
dataSourceStore cache.Store
namespaceStore cache.Store
pvcStore cache.Store
crIndexer cache.Indexer
instancetypeController instancetypeHandler
recorder record.EventRecorder
expectations *controller.UIDTrackingControllerExpectations
dataVolumeExpectations *controller.UIDTrackingControllerExpectations
cloneAuthFunc CloneAuthFunc
clusterConfig *virtconfig.ClusterConfig
hasSynced func() bool
netSynchronizer synchronizer
firmwareSynchronizer synchronizer
additionalLauncherAnnotationsSync []string
additionalLauncherLabelsSync []string
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.Queue.ShutDown()
log.Log.Info("Starting VirtualMachine controller.")
// Wait for cache sync before we start the controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping VirtualMachine controller.")
}
func (c *Controller) runWorker() {
for c.Execute() {
}
}
func (c *Controller) satisfiedExpectations(key string) bool {
return c.expectations.SatisfiedExpectations(key) && c.dataVolumeExpectations.SatisfiedExpectations(key)
}
var virtControllerVMWorkQueueTracer = &traceUtils.Tracer{Threshold: time.Second}
func (c *Controller) Execute() bool {
key, quit := c.Queue.Get()
if quit {
return false
}
virtControllerVMWorkQueueTracer.StartTrace(key, "virt-controller VM workqueue", trace.Field{Key: "Workqueue Key", Value: key})
defer virtControllerVMWorkQueueTracer.StopTrace(key)
defer c.Queue.Done(key)
if err := c.execute(key); err != nil {
log.Log.Reason(err).Infof("re-enqueuing VirtualMachine %v", key)
c.Queue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed VirtualMachine %v", key)
c.Queue.Forget(key)
}
return true
}
func (c *Controller) execute(key string) error {
obj, exists, err := c.vmIndexer.GetByKey(key)
if err != nil {
return nil
}
if !exists {
// nothing we need to do. It should always be possible to re-create this type of controller
c.expectations.DeleteExpectations(key)
return nil
}
originalVM := obj.(*virtv1.VirtualMachine)
vm := originalVM.DeepCopy()
logger := log.Log.Object(vm)
logger.V(4).Info("Started processing vm")
// this must be first step in execution. Writing the object
// when api version changes ensures our api stored version is updated.
if !controller.ObservedLatestApiVersionAnnotation(vm) {
controller.SetLatestApiVersionAnnotation(vm)
_, err = c.clientset.VirtualMachine(vm.Namespace).Update(context.Background(), vm, metav1.UpdateOptions{})
if err != nil {
logger.Reason(err).Error("Updating api version annotations failed")
}
return err
}
vmKey, err := controller.KeyFunc(vm)
if err != nil {
return err
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing VirtualMachines (see kubernetes/kubernetes#42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := c.clientset.VirtualMachine(vm.ObjectMeta.Namespace).Get(context.Background(), vm.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.ObjectMeta.UID != vm.ObjectMeta.UID {
return nil, fmt.Errorf("original VirtualMachine %v/%v is gone: got uid %v, wanted %v", vm.Namespace, vm.Name, fresh.UID, vm.UID)
}
return fresh, nil
})
cm := controller.NewVirtualMachineControllerRefManager(
controller.RealVirtualMachineControl{
Clientset: c.clientset,
}, vm, nil, virtv1.VirtualMachineGroupVersionKind, canAdoptFunc)
var vmi *virtv1.VirtualMachineInstance
vmiObj, exist, err := c.vmiIndexer.GetByKey(vmKey)
if err != nil {
logger.Reason(err).Error("Failed to fetch vmi for namespace from cache.")
return err
}
if !exist {
logger.V(4).Infof("VirtualMachineInstance not found in cache %s", key)
vmi = nil
} else {
vmi = vmiObj.(*virtv1.VirtualMachineInstance)
vmi, err = cm.ClaimVirtualMachineInstanceByName(vmi)
if err != nil {
return err
}
}
dataVolumes, err := storagetypes.ListDataVolumesFromTemplates(vm.Namespace, vm.Spec.DataVolumeTemplates, c.dataVolumeStore)
if err != nil {
logger.Reason(err).Error("Failed to fetch dataVolumes for namespace from cache.")
return err
}
if len(dataVolumes) != 0 {
dataVolumes, err = cm.ClaimMatchedDataVolumes(dataVolumes)
if err != nil {
return err
}
}
var syncErr common.SyncError
vm, vmi, syncErr, err = c.sync(vm, vmi, key)
if err != nil {
return err
}
if syncErr != nil {
logger.Reason(syncErr).Error("Reconciling the VirtualMachine failed.")
}
err = c.updateStatus(vm, originalVM, vmi, syncErr, logger)
if err != nil {
logger.Reason(err).Error("Updating the VirtualMachine status failed.")
return err
}
return syncErr
}
func (c *Controller) handleCloneDataVolume(vm *virtv1.VirtualMachine, dv *cdiv1.DataVolume) error {
if dv.Spec.SourceRef != nil {
return fmt.Errorf("DataVolume sourceRef not supported")
}
if dv.Spec.Source == nil {
return nil
}
// For consistency with other k8s objects, we allow creating clone DataVolumes even when the source PVC doesn't exist.
// This means that a VirtualMachine can be successfully created with volumes that may remain unpopulated until the source PVC is created.
// For this reason, we check if the source PVC exists and, if not, we trigger an event to let users know of this behavior.
if dv.Spec.Source.PVC != nil {
// TODO: a lot of CDI knowledge, maybe an API to check if source exists?
pvc, err := storagetypes.GetPersistentVolumeClaimFromCache(dv.Spec.Source.PVC.Namespace, dv.Spec.Source.PVC.Name, c.pvcStore)
if err != nil {
return err
}
if pvc == nil {
c.recorder.Eventf(vm, k8score.EventTypeWarning, SourcePVCNotAvailabe, "Source PVC %s not available: Target PVC %s will remain unpopulated until source is created", dv.Spec.Source.PVC.Name, dv.Name)
}
}
if err := c.authorizeDataVolume(vm, dv); err != nil {
c.recorder.Eventf(vm, k8score.EventTypeWarning, UnauthorizedDataVolumeCreateReason, "Not authorized to create DataVolume %s: %v", dv.Name, err)
return fmt.Errorf("not authorized to create DataVolume: %v", err)
}
return nil
}
func (c *Controller) authorizeDataVolume(vm *virtv1.VirtualMachine, dataVolume *cdiv1.DataVolume) error {
serviceAccountName := "default"
for _, vol := range vm.Spec.Template.Spec.Volumes {
if vol.ServiceAccount != nil {
serviceAccountName = vol.ServiceAccount.ServiceAccountName
}
}
proxy := &authProxy{client: c.clientset, dataSourceStore: c.dataSourceStore, namespaceStore: c.namespaceStore}
allowed, reason, err := c.cloneAuthFunc(dataVolume, vm.Namespace, dataVolume.Name, proxy, vm.Namespace, serviceAccountName)
if err != nil && err != cdiv1.ErrNoTokenOkay {
return err
}
if !allowed {
return fmt.Errorf("%s", reason)
}
return nil
}
func (c *Controller) handleDataVolumes(vm *virtv1.VirtualMachine) (bool, error) {
ready := true
vmKey, err := controller.KeyFunc(vm)
if err != nil {
return ready, err
}
for _, template := range vm.Spec.DataVolumeTemplates {
curDataVolume, err := storagetypes.GetDataVolumeFromCache(vm.Namespace, template.Name, c.dataVolumeStore)
if err != nil {
return false, err
}
if curDataVolume == nil {
// Don't create DV if PVC already exists
pvc, err := storagetypes.GetPersistentVolumeClaimFromCache(vm.Namespace, template.Name, c.pvcStore)
if err != nil {
return false, err
}
// ready = false because encountered DataVolume that is not created yet
ready = false
newDataVolume, err := watchutil.CreateDataVolumeManifest(c.clientset, template, vm)
if err != nil {
return ready, fmt.Errorf("unable to create DataVolume manifest: %v", err)
}
// We validate requirements that are exclusive to clone DataVolumes
if err = c.handleCloneDataVolume(vm, newDataVolume); err != nil {
return ready, err
}
c.dataVolumeExpectations.ExpectCreations(vmKey, 1)
curDataVolume, err = c.clientset.CdiClient().CdiV1beta1().DataVolumes(vm.Namespace).Create(context.Background(), newDataVolume, metav1.CreateOptions{})
if err != nil {
c.dataVolumeExpectations.CreationObserved(vmKey)
if pvc != nil && strings.Contains(err.Error(), "already exists") {
// If the PVC already exists, we can ignore the error and continue
// probably old version of CDI
log.Log.Object(vm).Reason(err).Warning("Appear to be running a version of CDI that does not support claim adoption annotation")
continue
}
c.recorder.Eventf(vm, k8score.EventTypeWarning, FailedDataVolumeCreateReason, "Error creating DataVolume %s: %v", newDataVolume.Name, err)
return ready, fmt.Errorf("failed to create DataVolume: %v", err)
}
c.recorder.Eventf(vm, k8score.EventTypeNormal, SuccessfulDataVolumeCreateReason, "Created DataVolume %s", curDataVolume.Name)
} else {
switch curDataVolume.Status.Phase {
case cdiv1.Succeeded, cdiv1.WaitForFirstConsumer, cdiv1.PendingPopulation:
continue
case cdiv1.Failed:
c.recorder.Eventf(vm, k8score.EventTypeWarning, controller.FailedDataVolumeImportReason, "DataVolume %s failed to import disk image", curDataVolume.Name)
case cdiv1.Pending:
if err := storagetypes.HasDataVolumeExceededQuotaError(curDataVolume); err != nil {
c.recorder.Eventf(vm, k8score.EventTypeWarning, controller.FailedDataVolumeImportReason, "DataVolume %s exceeds quota limits", curDataVolume.Name)
return false, err
}
}
// ready = false because encountered DataVolume that is not populated yet
ready = false
}
}
return ready, nil
}
func (c *Controller) VMICPUsPatch(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
patchSet := patch.New(
patch.WithTest("/spec/domain/cpu/sockets", vmi.Spec.Domain.CPU.Sockets),
patch.WithReplace("/spec/domain/cpu/sockets", vm.Spec.Template.Spec.Domain.CPU.Sockets),
)
vcpusDelta := hardware.GetNumberOfVCPUs(vm.Spec.Template.Spec.Domain.CPU) - hardware.GetNumberOfVCPUs(vmi.Spec.Domain.CPU)
resourcesDelta := resource.NewMilliQuantity(vcpusDelta*int64(1000/c.clusterConfig.GetCPUAllocationRatio()), resource.DecimalSI)
logMsg := fmt.Sprintf("hotplugging cpu to %v sockets", vm.Spec.Template.Spec.Domain.CPU.Sockets)
if !vm.Spec.Template.Spec.Domain.Resources.Requests.Cpu().IsZero() {
newCpuReq := vmi.Spec.Domain.Resources.Requests.Cpu().DeepCopy()
newCpuReq.Add(*resourcesDelta)
patchSet.AddOption(
patch.WithTest("/spec/domain/resources/requests/cpu", vmi.Spec.Domain.Resources.Requests.Cpu().String()),
patch.WithReplace("/spec/domain/resources/requests/cpu", newCpuReq.String()),
)
logMsg = fmt.Sprintf("%s, setting requests to %s", logMsg, newCpuReq.String())
}
if !vm.Spec.Template.Spec.Domain.Resources.Limits.Cpu().IsZero() {
newCpuLimit := vmi.Spec.Domain.Resources.Limits.Cpu().DeepCopy()
newCpuLimit.Add(*resourcesDelta)
patchSet.AddOption(
patch.WithTest("/spec/domain/resources/limits/cpu", vmi.Spec.Domain.Resources.Limits.Cpu().String()),
patch.WithReplace("/spec/domain/resources/limits/cpu", newCpuLimit.String()),
)
logMsg = fmt.Sprintf("%s, setting limits to %s", logMsg, newCpuLimit.String())
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
if err == nil {
log.Log.Object(vmi).Infof("%s", logMsg)
}
return err
}
func (c *Controller) handleCPUChangeRequest(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil || vmi.DeletionTimestamp != nil {
return nil
}
vmCopyWithInstancetype := vm.DeepCopy()
if err := c.instancetypeController.ApplyToVM(vmCopyWithInstancetype); err != nil {
return err
}
if vmCopyWithInstancetype.Spec.Template.Spec.Domain.CPU == nil || vmi.Spec.Domain.CPU == nil {
return nil
}
if vmCopyWithInstancetype.Spec.Template.Spec.Domain.CPU.Sockets == vmi.Spec.Domain.CPU.Sockets {
return nil
}
vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
if vmiConditions.HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceVCPUChange, k8score.ConditionTrue) {
return fmt.Errorf("another CPU hotplug is in progress")
}
if migrations.IsMigrating(vmi) {
return fmt.Errorf("CPU hotplug is not allowed while VMI is migrating")
}
// If the following is true, MaxSockets was calculated, not manually specified (or the validation webhook would have rejected the change).
// Since we're here, we can also assume MaxSockets was not changed in the VM spec since last boot.
// Therefore, bumping Sockets to a value higher than MaxSockets is fine, it just requires a reboot.
if vmCopyWithInstancetype.Spec.Template.Spec.Domain.CPU.Sockets > vmi.Spec.Domain.CPU.MaxSockets {
setRestartRequired(vm, "CPU sockets updated in template spec to a value higher than what's available")
return nil
}
if vmCopyWithInstancetype.Spec.Template.Spec.Domain.CPU.Sockets < vmi.Spec.Domain.CPU.Sockets {
setRestartRequired(vm, "Reduction of CPU socket count requires a restart")
return nil
}
if virtconfig.IsARM64(vm.Spec.Template.Spec.Architecture) {
setRestartRequired(vm, "ARM doesn't support CPU hotplug")
return nil
}
if err := c.VMICPUsPatch(vmCopyWithInstancetype, vmi); err != nil {
log.Log.Object(vmi).Errorf("unable to patch vmi to add cpu topology status: %v", err)
return err
}
return nil
}
func (c *Controller) VMNodeSelectorPatch(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
patchset := patch.New()
if vm.Spec.Template.Spec.NodeSelector != nil {
vmNodeSelector := maps.Clone(vm.Spec.Template.Spec.NodeSelector)
if vmNodeSelector == nil {
vmNodeSelector = make(map[string]string)
}
if vmi.Spec.NodeSelector == nil {
patchset.AddOption(patch.WithAdd("/spec/nodeSelector", vmNodeSelector))
} else {
patchset.AddOption(
patch.WithTest("/spec/nodeSelector", vmi.Spec.NodeSelector),
patch.WithReplace("/spec/nodeSelector", vmNodeSelector))
}
} else {
patchset.AddOption(patch.WithRemove("/spec/nodeSelector"))
}
generatedPatch, err := patchset.GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, generatedPatch, metav1.PatchOptions{})
return err
}
func (c *Controller) VMIAffinityPatch(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
patchset := patch.New()
if vm.Spec.Template.Spec.Affinity != nil {
if vmi.Spec.Affinity == nil {
patchset.AddOption(patch.WithAdd("/spec/affinity", vm.Spec.Template.Spec.Affinity))
} else {
patchset.AddOption(
patch.WithTest("/spec/affinity", vmi.Spec.Affinity),
patch.WithReplace("/spec/affinity", vm.Spec.Template.Spec.Affinity))
}
} else {
patchset.AddOption(patch.WithRemove("/spec/affinity"))
}
generatedPatch, err := patchset.GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, generatedPatch, metav1.PatchOptions{})
return err
}
func (c *Controller) vmiTolerationsPatch(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
patchset := patch.New()
if vm.Spec.Template.Spec.Tolerations != nil {
if vmi.Spec.Tolerations == nil {
patchset.AddOption(patch.WithAdd("/spec/tolerations", vm.Spec.Template.Spec.Tolerations))
} else {
patchset.AddOption(
patch.WithTest("/spec/tolerations", vmi.Spec.Tolerations),
patch.WithReplace("/spec/tolerations", vm.Spec.Template.Spec.Tolerations))
}
} else {
patchset.AddOption(patch.WithRemove("/spec/tolerations"))
}
generatedPatch, err := patchset.GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, generatedPatch, metav1.PatchOptions{})
return err
}
func (c *Controller) handleTolerationsChangeRequest(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil || vmi.DeletionTimestamp != nil {
return nil
}
vmCopyWithInstancetype := vm.DeepCopy()
if err := c.instancetypeController.ApplyToVM(vmCopyWithInstancetype); err != nil {
return err
}
if equality.Semantic.DeepEqual(vmCopyWithInstancetype.Spec.Template.Spec.Tolerations, vmi.Spec.Tolerations) {
return nil
}
if migrations.IsMigrating(vmi) {
return fmt.Errorf("tolerations should not be changed during VMI migration")
}
if err := c.vmiTolerationsPatch(vmCopyWithInstancetype, vmi); err != nil {
log.Log.Object(vmi).Errorf("unable to patch vmi to update tolerations: %v", err)
return err
}
return nil
}
func (c *Controller) handleAffinityChangeRequest(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil || vmi.DeletionTimestamp != nil {
return nil
}
vmCopyWithInstancetype := vm.DeepCopy()
if err := c.instancetypeController.ApplyToVM(vmCopyWithInstancetype); err != nil {
return err
}
hasNodeSelectorChanged := !equality.Semantic.DeepEqual(vmCopyWithInstancetype.Spec.Template.Spec.NodeSelector, vmi.Spec.NodeSelector)
hasNodeAffinityChanged := !equality.Semantic.DeepEqual(vmCopyWithInstancetype.Spec.Template.Spec.Affinity, vmi.Spec.Affinity)
if migrations.IsMigrating(vmi) && (hasNodeSelectorChanged || hasNodeAffinityChanged) {
return fmt.Errorf("Node affinity should not be changed during VMI migration")
}
if hasNodeAffinityChanged {
if err := c.VMIAffinityPatch(vmCopyWithInstancetype, vmi); err != nil {
log.Log.Object(vmi).Errorf("unable to patch vmi to update node affinity: %v", err)
return err
}
}
if hasNodeSelectorChanged {
if err := c.VMNodeSelectorPatch(vmCopyWithInstancetype, vmi); err != nil {
log.Log.Object(vmi).Errorf("unable to patch vmi to update node selector: %v", err)
return err
}
}
return nil
}
func (c *Controller) handleVolumeRequests(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if len(vm.Status.VolumeRequests) == 0 {
return nil
}
vmiVolumeMap := make(map[string]virtv1.Volume)
if vmi != nil {
for _, volume := range vmi.Spec.Volumes {
vmiVolumeMap[volume.Name] = volume
}
}
for i, request := range vm.Status.VolumeRequests {
vm.Spec.Template.Spec = *controller.ApplyVolumeRequestOnVMISpec(&vm.Spec.Template.Spec, &vm.Status.VolumeRequests[i])
if vmi == nil || vmi.DeletionTimestamp != nil {
continue
}
if request.AddVolumeOptions != nil {
if _, exists := vmiVolumeMap[request.AddVolumeOptions.Name]; exists {
continue
}
if err := c.clientset.VirtualMachineInstance(vmi.Namespace).AddVolume(context.Background(), vmi.Name, request.AddVolumeOptions); err != nil {
return err
}
} else if request.RemoveVolumeOptions != nil {
if _, exists := vmiVolumeMap[request.RemoveVolumeOptions.Name]; !exists {
continue
}
if err := c.clientset.VirtualMachineInstance(vmi.Namespace).RemoveVolume(context.Background(), vmi.Name, request.RemoveVolumeOptions); err != nil {
return err
}
}
}
return nil
}
func (c *Controller) handleValidationErrors(err error, vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) error {
if errors.Is(err, storagetypes.ErrPVCNotFound) || errors.Is(err, storagetypes.ErrDVNotFound) {
msg := fmt.Sprintf("One of the destination volumes doesn't exist: %v", err)
log.Log.Object(vm).Error(msg)
if err := volumemig.SetVolumesChangeCondition(c.clientset, vmi, k8score.ConditionFalse, msg); err != nil {
return err
}
return nil
}
setRestartRequired(vm, err.Error())
log.Log.Object(vm).Errorf("cannot migrate the VM. Volumes are invalid: %v", err)
return nil
}
func (c *Controller) handleVolumeUpdateRequest(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil {
return nil
}
// Abort the volume migration if any of the previous migrated volumes
// has changed
if volMigAbort, err := volumemig.VolumeMigrationCancel(c.clientset, vmi, vm); volMigAbort {
if err == nil {
log.Log.Object(vm).Infof("Cancel volume migration")
}
return err
}
switch {
case vm.Spec.UpdateVolumesStrategy == nil ||
*vm.Spec.UpdateVolumesStrategy == virtv1.UpdateVolumesStrategyReplacement:
log.Log.Object(vm).V(4).Infof("not handling replacement update volumes strategy")
case *vm.Spec.UpdateVolumesStrategy == virtv1.UpdateVolumesStrategyMigration:
if !volumemig.PersistentVolumesUpdated(&vm.Spec.Template.Spec, &vmi.Spec) {
log.Log.Object(vm).V(4).Infof("No persistent volumes updated")
return nil
}
// Validate if the update volumes can be migrated
if err := volumemig.ValidateVolumes(vmi, vm, c.dataVolumeStore, c.pvcStore); err != nil {
return c.handleValidationErrors(err, vmi, vm)
} else if err := volumemig.UnsetVolumeChangeCondition(c.clientset, vmi); err != nil {
return err
}
migVols, err := volumemig.GenerateMigratedVolumes(c.pvcStore, vmi, vm)
if err != nil {
log.Log.Object(vm).Errorf("failed to generate the migrating volumes for vm: %v", err)
return err
}
if err := volumemig.ValidateVolumesUpdateMigration(vmi, vm, migVols); err != nil {
log.Log.Object(vm).Errorf("cannot migrate the VMI: %v", err)
setRestartRequired(vm, err.Error())
return nil
}
if err := volumemig.PatchVMIStatusWithMigratedVolumes(c.clientset, migVols, vmi); err != nil {
log.Log.Object(vm).Errorf("failed to update migrating volumes for vmi:%v", err)
return err
}
log.Log.Object(vm).Infof("Updated migrating volumes in the status")
if _, err := volumemig.PatchVMIVolumes(c.clientset, vmi, vm); err != nil {
log.Log.Object(vm).Errorf("failed to update volumes for vmi:%v", err)
return err
}
log.Log.Object(vm).Infof("Updated volumes for vmi")
if vm.Status.VolumeUpdateState == nil {
vm.Status.VolumeUpdateState = &virtv1.VolumeUpdateState{}
}
if len(migVols) > 0 {
vm.Status.VolumeUpdateState.VolumeMigrationState = &virtv1.VolumeMigrationState{
MigratedVolumes: migVols,
}
}
default:
return fmt.Errorf("updateVolumes strategy not recognized: %s", *vm.Spec.UpdateVolumesStrategy)
}
return nil
}
func (c *Controller) addStartRequest(vm *virtv1.VirtualMachine) error {
desiredStateChangeRequests := append(vm.Status.StateChangeRequests, virtv1.VirtualMachineStateChangeRequest{Action: virtv1.StartRequest})
patchSet := patch.New()
patchSet.AddOption(patch.WithAdd("/status/stateChangeRequests", desiredStateChangeRequests))
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
patchedVM, err := c.clientset.VirtualMachine(vm.Namespace).PatchStatus(context.Background(), vm.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return err
}
vm.Status = patchedVM.Status
return nil
}
func (c *Controller) syncRunStrategy(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance, runStrategy virtv1.VirtualMachineRunStrategy) (*virtv1.VirtualMachine, common.SyncError) {
vmKey, err := controller.KeyFunc(vm)
if err != nil {
log.Log.Object(vm).Errorf(fetchingVMKeyErrFmt, err)
return vm, common.NewSyncError(err, failedCreateReason)
}
log.Log.Object(vm).V(4).Infof("VirtualMachine RunStrategy: %s", runStrategy)
switch runStrategy {
case virtv1.RunStrategyAlways:
// For this RunStrategy, a VMI should always be running. If a StateChangeRequest
// asks to stop a VMI, a new one must be immediately re-started.
if vmi != nil {
var forceRestart bool
if forceRestart = hasStopRequestForVMI(vm, vmi); forceRestart {
log.Log.Object(vm).Infof("processing forced restart request for VMI with phase %s and VM runStrategy: %s", vmi.Status.Phase, runStrategy)
}
if forceRestart || vmi.IsFinal() {
if vmi.IsDecentralizedMigration() {
if vmi.IsMigrationCompleted() {
log.Log.Object(vm).Infof("decentralized migration completed, setting runStrategy to halted")
// decentralized migration completed, mark the VM as halted. In this case the VM is now in a different
// namespace/cluster and we need to stop the VM.
vm.Spec.RunStrategy = pointer.P(virtv1.RunStrategyHalted)
// return here and let the halted runstrategy stop the VMI.
return vm, nil
}
// It is possible that the VMI has not synchronized yet with the completed migration status, but the VMI is
// marked as succeeded. This will normally trigger a restart due to the runStrategy. But we want to wait for
// the migration to complete and then mark the run strategy as halted.
log.Log.Object(vm).V(4).Infof("decentralized migration not completed, adding to queue, waiting 2 seconds")
c.Queue.AddAfter(vmKey, 2*time.Second)
return vm, nil
}
log.Log.Object(vm).Infof("%s with VMI in phase %s and VM runStrategy: %s", stoppingVmMsg, vmi.Status.Phase, runStrategy)
// The VirtualMachineInstance can fail or be finished. The job of this controller
// is keep the VirtualMachineInstance running, therefore it restarts it.
// restarting VirtualMachineInstance by stopping it and letting it start in next step
log.Log.Object(vm).V(4).Info(stoppingVmMsg)
vm, err = c.stopVMI(vm, vmi)
if err != nil {
log.Log.Object(vm).Errorf(failureDeletingVmiErrFormat, err)
return vm, common.NewSyncError(fmt.Errorf(failureDeletingVmiErrFormat, err), vmiFailedDeleteReason)
}
// return to let the controller pick up the expected deletion
}
log.Log.Object(vm).V(4).Infof("VMI is not final, uid %s, phase %s", vmi.UID, vmi.Status.Phase)
// VirtualMachineInstance is OK no need to do anything
return vm, nil
} else {
log.Log.Object(vm).V(4).Info("VMI is nil, checking if we need to start it")
}
timeLeft := startFailureBackoffTimeLeft(vm)
if timeLeft > 0 {
log.Log.Object(vm).Infof("Delaying start of VM %s with 'runStrategy: %s' due to start failure backoff. Waiting %d more seconds before starting.", startingVmMsg, runStrategy, timeLeft)
c.Queue.AddAfter(vmKey, time.Duration(timeLeft)*time.Second)
return vm, nil
}
log.Log.Object(vm).Infof("%s due to runStrategy: %s", startingVmMsg, runStrategy)
vm, err = c.startVMI(vm)
if err != nil {
return vm, common.NewSyncError(fmt.Errorf(startingVMIFailureFmt, err), failedCreateReason)
}
return vm, nil
case virtv1.RunStrategyRerunOnFailure:
// For this RunStrategy, a VMI should only be restarted if it failed.
// If a VMI enters the Succeeded phase, it should not be restarted.
if vmi != nil {
forceStop := hasStopRequestForVMI(vm, vmi)
if forceStop {
log.Log.Object(vm).Infof("processing stop request for VMI with phase %s and VM runStrategy: %s", vmi.Status.Phase, runStrategy)
}
vmiFailed := vmi.Status.Phase == virtv1.Failed
vmiSucceeded := vmi.Status.Phase == virtv1.Succeeded
if vmi.DeletionTimestamp == nil && (forceStop || vmiFailed || vmiSucceeded) {
// For RerunOnFailure, this controller should only restart the VirtualMachineInstance if it failed.
log.Log.Object(vm).Infof("%s with VMI in phase %s and VM runStrategy: %s", stoppingVmMsg, vmi.Status.Phase, runStrategy)
vm, err = c.stopVMI(vm, vmi)
if err != nil {
log.Log.Object(vm).Errorf(failureDeletingVmiErrFormat, err)
return vm, common.NewSyncError(fmt.Errorf(failureDeletingVmiErrFormat, err), vmiFailedDeleteReason)
}
if vmiFailed {
if err := c.addStartRequest(vm); err != nil {
return vm, common.NewSyncError(fmt.Errorf("failed to patch VM with start action: %v", err), vmiFailedDeleteReason)
}
}
}
// return to let the controller pick up the expected deletion
return vm, nil
}
// when coming here from a different RunStrategy we have to start the VM
if !hasStartRequest(vm) && vm.Status.RunStrategy == runStrategy {
return vm, nil
}
timeLeft := startFailureBackoffTimeLeft(vm)
if timeLeft > 0 {
log.Log.Object(vm).Infof("Delaying start of VM %s with 'runStrategy: %s' due to start failure backoff. Waiting %d more seconds before starting.", startingVmMsg, runStrategy, timeLeft)
c.Queue.AddAfter(vmKey, time.Duration(timeLeft)*time.Second)
return vm, nil
}
log.Log.Object(vm).Infof("%s due to runStrategy: %s", startingVmMsg, runStrategy)
vm, err = c.startVMI(vm)
if err != nil {
return vm, common.NewSyncError(fmt.Errorf(startingVMIFailureFmt, err), failedCreateReason)
}
return vm, nil
case virtv1.RunStrategyManual:
// For this RunStrategy, VMI's will be started/stopped/restarted using api endpoints only
if vmi != nil {
log.Log.Object(vm).V(4).Info("VMI exists")
if forceStop := hasStopRequestForVMI(vm, vmi); forceStop {
log.Log.Object(vm).Infof("%s with VMI in phase %s due to stop request and VM runStrategy: %s", vmi.Status.Phase, stoppingVmMsg, runStrategy)
vm, err = c.stopVMI(vm, vmi)
if err != nil {
log.Log.Object(vm).Errorf(failureDeletingVmiErrFormat, err)
return vm, common.NewSyncError(fmt.Errorf(failureDeletingVmiErrFormat, err), vmiFailedDeleteReason)
}
// return to let the controller pick up the expected deletion
return vm, nil
}
} else {
if hasStartRequest(vm) {
log.Log.Object(vm).Infof("%s due to start request and runStrategy: %s", startingVmMsg, runStrategy)
vm, err = c.startVMI(vm)
if err != nil {
return vm, common.NewSyncError(fmt.Errorf(startingVMIFailureFmt, err), failedCreateReason)
}
}
}
return vm, nil
case virtv1.RunStrategyHalted:
// For this runStrategy, no VMI should be running under any circumstances.
// Set RunStrategyAlways/running = true if VM has StartRequest(start paused case).
if vmi == nil {
if hasStartRequest(vm) {
vmCopy := vm.DeepCopy()
runStrategy := virtv1.RunStrategyAlways
running := true
if vmCopy.Spec.RunStrategy != nil {
vmCopy.Spec.RunStrategy = &runStrategy
} else {
vmCopy.Spec.Running = &running
}
_, err := c.clientset.VirtualMachine(vmCopy.Namespace).Update(context.Background(), vmCopy, metav1.UpdateOptions{})
return vm, common.NewSyncError(fmt.Errorf(startingVMIFailureFmt, err), failedCreateReason)
}
return vm, nil
}
log.Log.Object(vm).Infof("%s with VMI in phase %s due to runStrategy: %s", stoppingVmMsg, vmi.Status.Phase, runStrategy)
vm, err = c.stopVMI(vm, vmi)
if err != nil {
return vm, common.NewSyncError(fmt.Errorf(failureDeletingVmiErrFormat, err), vmiFailedDeleteReason)
}
return vm, nil
case virtv1.RunStrategyOnce:
if vmi == nil {
log.Log.Object(vm).Infof("%s due to start request and runStrategy: %s", startingVmMsg, runStrategy)
vm, err = c.startVMI(vm)
if err != nil {
return vm, common.NewSyncError(fmt.Errorf(startingVMIFailureFmt, err), failedCreateReason)
}
}
return vm, nil
case virtv1.RunStrategyWaitAsReceiver:
// Create a VMI in receiver mode, this prevents someone from accidentally starting the VM.
if vmi != nil {
// Check if this is a receiver VMI
if val, ok := vmi.Annotations[virtv1.CreateMigrationTarget]; !ok || val != "true" {
if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.Completed {
log.Log.Object(vm).V(4).Infof("VMI %s/%s is a receiver VMI and has completed migration", vmi.Namespace, vmi.Name)
// Restore the original run strategy
if val, ok := vm.Annotations[virtv1.RestoreRunStrategy]; ok {
vm.Spec.RunStrategy = pointer.P(virtv1.VirtualMachineRunStrategy(val))
}
return vm, nil
}
return vm, common.NewSyncError(fmt.Errorf(nonReceiverVMI), failedCreateReason)
}
} else {
log.Log.Object(vm).Infof("%s due to runStrategy: %s", startingVmReceiverMsg, runStrategy)
vm, err = c.startVMI(vm)
if err != nil {
return vm, common.NewSyncError(fmt.Errorf(startingVMIFailureFmt, err), failedCreateReason)
}
}
return vm, nil
default:
return vm, common.NewSyncError(fmt.Errorf("unknown runstrategy: %s", runStrategy), failedCreateReason)
}
}
// isVMIStartExpected determines whether a VMI is expected to be started for this VM.
func (c *Controller) isVMIStartExpected(vm *virtv1.VirtualMachine) bool {
vmKey, err := controller.KeyFunc(vm)
if err != nil {
log.Log.Object(vm).Errorf(fetchingVMKeyErrFmt, err)
return false
}
expectations, exists, _ := c.expectations.GetExpectations(vmKey)
if !exists || expectations == nil {
return false
}
adds, _ := expectations.GetExpectations()
return adds > 0
}
// isVMIStopExpected determines whether a VMI is expected to be stopped for this VM.
func (c *Controller) isVMIStopExpected(vm *virtv1.VirtualMachine) bool {
vmKey, err := controller.KeyFunc(vm)
if err != nil {
log.Log.Object(vm).Errorf(fetchingVMKeyErrFmt, err)
return false
}
expectations, exists, _ := c.expectations.GetExpectations(vmKey)
if !exists || expectations == nil {
return false
}
_, dels := expectations.GetExpectations()
return dels > 0
}
// isSetToStart determines whether a VM is configured to be started (running).
func isSetToStart(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
runStrategy, err := vm.RunStrategy()
if err != nil {
log.Log.Object(vm).Errorf(fetchingRunStrategyErrFmt, err)
return false
}
switch runStrategy {
case virtv1.RunStrategyAlways:
return true
case virtv1.RunStrategyHalted:
return false
case virtv1.RunStrategyManual:
if vmi != nil {
return !hasStopRequestForVMI(vm, vmi)
}
return hasStartRequest(vm)
case virtv1.RunStrategyRerunOnFailure:
if vmi != nil {
return vmi.Status.Phase != virtv1.Succeeded
}
return true
case virtv1.RunStrategyOnce:
if vmi == nil {
return true
}
return false
default:
// Shouldn't ever be here, but...
return false
}
}
func (c *Controller) cleanupRestartRequired(vm *virtv1.VirtualMachine) *virtv1.VirtualMachine {
vmConditionManager := controller.NewVirtualMachineConditionManager()
if vmConditionManager.HasCondition(vm, virtv1.VirtualMachineRestartRequired) {
vmConditionManager.RemoveCondition(vm, virtv1.VirtualMachineRestartRequired)
}
return vm
}
func (c *Controller) startVMI(vm *virtv1.VirtualMachine) (*virtv1.VirtualMachine, error) {
ready, err := c.handleDataVolumes(vm)
if err != nil {
return vm, err
}
if !ready {
log.Log.Object(vm).V(4).Info("Waiting for DataVolumes to be created, delaying start")
return vm, nil
}
if controller.NewVirtualMachineConditionManager().HasConditionWithStatus(vm, virtv1.VirtualMachineManualRecoveryRequired, k8score.ConditionTrue) {
log.Log.Object(vm).Reason(err).Error(failedManualRecoveryRequiredCondSetErrMsg)
return vm, nil
}
// TODO add check for existence
vmKey, err := controller.KeyFunc(vm)
if err != nil {
log.Log.Object(vm).Reason(err).Error(failedExtractVmkeyFromVmErrMsg)
return vm, nil
}
vm = c.cleanupRestartRequired(vm)
// start it
vmi := SetupVMIFromVM(vm)
vmRevisionName, err := c.createVMRevision(vm)
if err != nil {
log.Log.Object(vm).Reason(err).Error(failedCreateCRforVmErrMsg)
return vm, err
}
vmi.Status.VirtualMachineRevisionName = vmRevisionName
setGenerationAnnotationOnVmi(vm.Generation, vmi)
if vm.Spec.RunStrategy != nil && *vm.Spec.RunStrategy == virtv1.RunStrategyWaitAsReceiver {
log.Log.Infof("Setting up receiver VMI %s/%s", vmi.Namespace, vmi.Name)
vmi.Annotations[virtv1.CreateMigrationTarget] = "true"
}
// add a finalizer to ensure the VM controller has a chance to see
// the VMI before it is deleted
vmi.Finalizers = append(vmi.Finalizers, virtv1.VirtualMachineControllerFinalizer)
// We need to apply auto attach preferences before any new network or input devices are added.
if err := c.instancetypeController.ApplyAutoAttachPreferences(vm, vmi); err != nil {
log.Log.Object(vm).Infof("Failed to apply device preferences again to VirtualMachineInstance: %s/%s", vmi.Namespace, vmi.Name)
c.recorder.Eventf(vm, k8score.EventTypeWarning, common.FailedCreateVirtualMachineReason, "Error applying device preferences again: %v", err)
return vm, err
}
cbt.SetChangedBlockTrackingOnVMI(vm, vmi, c.clusterConfig, c.namespaceStore)
AutoAttachInputDevice(vmi)
err = netvmispec.SetDefaultNetworkInterface(c.clusterConfig, &vmi.Spec)
if err != nil {
return vm, err
}
if err = c.instancetypeController.ApplyToVMI(vm, vmi); err != nil {
log.Log.Object(vm).Infof("Failed to apply instancetype to VirtualMachineInstance: %s/%s", vmi.Namespace, vmi.Name)
c.recorder.Eventf(vm, k8score.EventTypeWarning, common.FailedCreateVirtualMachineReason, "Error creating virtual machine instance: Failed to apply instancetype: %v", err)
return vm, err
}
netValidator := netadmitter.NewValidator(k8sfield.NewPath("spec"), &vmi.Spec, c.clusterConfig)
var validateErrors []error
for _, cause := range netValidator.ValidateCreation() {
validateErrors = append(validateErrors, errors.New(cause.String()))
}
if validateErr := errors.Join(validateErrors...); validateErrors != nil {
return vm, fmt.Errorf("failed create validation: %v", validateErr)
}
c.expectations.ExpectCreations(vmKey, 1)
vmi, err = c.clientset.VirtualMachineInstance(vm.ObjectMeta.Namespace).Create(context.Background(), vmi, metav1.CreateOptions{})
if err != nil {
log.Log.Object(vm).Infof("Failed to create VirtualMachineInstance: %s", controller.NamespacedKey(vmi.Namespace, vmi.Name))
c.expectations.CreationObserved(vmKey)
c.recorder.Eventf(vm, k8score.EventTypeWarning, common.FailedCreateVirtualMachineReason, "Error creating virtual machine instance: %v", err)
return vm, err
}
log.Log.Object(vm).Infof("Started VM by creating the new virtual machine instance %s", vmi.Name)
c.recorder.Eventf(vm, k8score.EventTypeNormal, common.SuccessfulCreateVirtualMachineReason, "Started the virtual machine by creating the new virtual machine instance %v", vmi.ObjectMeta.Name)
return vm, nil
}
func setGenerationAnnotation(generation int64, annotations map[string]string) {
annotations[virtv1.VirtualMachineGenerationAnnotation] = strconv.FormatInt(generation, 10)
}
func setGenerationAnnotationOnVmi(generation int64, vmi *virtv1.VirtualMachineInstance) {
annotations := vmi.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
setGenerationAnnotation(generation, annotations)
vmi.SetAnnotations(annotations)
}
func (c *Controller) patchVmGenerationAnnotationOnVmi(generation int64, vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachineInstance, error) {
oldAnnotations := vmi.Annotations
newAnnotations := map[string]string{}
maps.Copy(newAnnotations, oldAnnotations)
setGenerationAnnotation(generation, newAnnotations)
patchBytes, err := patch.New(
patch.WithTest("/metadata/annotations", oldAnnotations),
patch.WithReplace("/metadata/annotations", newAnnotations)).GeneratePayload()
if err != nil {
return vmi, err
}
patchedVMI, err := c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return vmi, err
}
return patchedVMI, nil
}
// getGenerationAnnotation will return the generation annotation on the
// vmi as an string pointer. The string pointer will be nil if the annotation is
// not found.
func getGenerationAnnotation(vmi *virtv1.VirtualMachineInstance) (i *string, err error) {
if vmi == nil {
return nil, errors.New("received nil pointer for vmi")
}
currentGenerationAnnotation, found := vmi.Annotations[virtv1.VirtualMachineGenerationAnnotation]
if found {
return ¤tGenerationAnnotation, nil
}
return nil, nil
}
// getGenerationAnnotation will return the generation annotation on the
// vmi as an int64 pointer. The int64 pointer will be nil if the annotation is
// not found.
func getGenerationAnnotationAsInt(vmi *virtv1.VirtualMachineInstance, logger *log.FilteredLogger) (i *int64, err error) {
if vmi == nil {
return nil, errors.New("received nil pointer for vmi")
}
currentGenerationAnnotation, found := vmi.Annotations[virtv1.VirtualMachineGenerationAnnotation]
if found {
i, err := strconv.ParseInt(currentGenerationAnnotation, 10, 64)
if err != nil {
// If there is an error during parsing, it will be treated as if the
// annotation does not exist since the annotation is not formatted
// correctly. Further iterations / logic in the controller will handle
// re-annotating this by the controller revision. Still log the error for
// debugging, since there should never be a ParseInt error during normal
// use.
logger.Reason(err).Errorf("Failed to parse virtv1.VirtualMachineGenerationAnnotation as an int from vmi %v annotations", vmi.Name)
return nil, nil
}
return &i, nil
}
return nil, nil
}
// Follows the template used in createVMRevision for the Data.Raw value
type VirtualMachineRevisionData struct {
Spec virtv1.VirtualMachineSpec `json:"spec"`
}
// conditionallyBumpGenerationAnnotationOnVmi will check whether the
// generation annotation needs to be bumped on the VMI, and then bump that
// annotation if needed. The checks are:
// 1. If the generation has not changed, do not bump.
// 2. Only bump if the templates are the same.
//
// Note that if only the Run Strategy of the VM has changed, the generaiton
// annotation will still be bumped, since this does not affect the VMI.
func (c *Controller) conditionallyBumpGenerationAnnotationOnVmi(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachineInstance, error) {
if vmi == nil || vm == nil {
return vmi, nil
}
// If this is an old vmi created before a controller update, then the
// annotation may not exist. In that case, continue on as if the generation
// annotation needs to be bumped.
currentGeneration, err := getGenerationAnnotation(vmi)
if err != nil {
return vmi, err
}
if currentGeneration != nil && *currentGeneration == strconv.FormatInt(vm.Generation, 10) {
return vmi, nil
}
currentRevision, err := c.getControllerRevision(vmi.Namespace, vmi.Status.VirtualMachineRevisionName)
if currentRevision == nil || err != nil {
return vmi, err
}
revisionSpec := &VirtualMachineRevisionData{}
if err = json.Unmarshal(currentRevision.Data.Raw, revisionSpec); err != nil {
return vmi, err
}
// If the templates are the same, we can safely bump the annotation.
if equality.Semantic.DeepEqual(revisionSpec.Spec.Template, vm.Spec.Template) {
patchedVMI, err := c.patchVmGenerationAnnotationOnVmi(vm.Generation, vmi)
if err != nil {
return vmi, err
}
vmi = patchedVMI
}
return vmi, nil
}
// Returns in seconds how long to wait before trying to start the VM again.
func calculateStartBackoffTime(failCount int, maxDelay int) int {
// The algorithm is designed to work well with a dynamic maxDelay
// if we decide to expose this as a tuning in the future.
minInterval := 10
delaySeconds := 0
if failCount <= 0 {
failCount = 1
}
multiplier := int(math.Pow(float64(failCount), float64(2)))
interval := maxDelay / 30
if interval < minInterval {
interval = minInterval
}
delaySeconds = interval * multiplier
randomRange := (delaySeconds / 2) + 1
// add randomized seconds to offset multiple failing VMs from one another
delaySeconds += rand.Intn(randomRange)
if delaySeconds > maxDelay {
delaySeconds = maxDelay
}
return delaySeconds
}
// Reports if vmi has ever hit a running state
func wasVMIInRunningPhase(vmi *virtv1.VirtualMachineInstance) bool {
if vmi == nil {
return false
}
for _, ts := range vmi.Status.PhaseTransitionTimestamps {
if ts.Phase == virtv1.Running {
return true
}
}
return false
}
// Reports if vmi failed before ever hitting a running state
func vmiFailedEarly(vmi *virtv1.VirtualMachineInstance) bool {
if vmi == nil || !vmi.IsFinal() {
return false
}
if wasVMIInRunningPhase(vmi) {
return false
}
return true
}
// clear start failure tracking if...
// 1. VMI exists and ever hit running phase
// 2. run strategy is not set to automatically restart failed VMIs
func shouldClearStartFailure(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if wasVMIInRunningPhase(vmi) {
return true
}
runStrategy, err := vm.RunStrategy()
if err != nil {
log.Log.Object(vm).Errorf(fetchingRunStrategyErrFmt, err)
return false
}
if runStrategy != virtv1.RunStrategyAlways &&
runStrategy != virtv1.RunStrategyRerunOnFailure &&
runStrategy != virtv1.RunStrategyOnce {
return true
}
return false
}
func startFailureBackoffTimeLeft(vm *virtv1.VirtualMachine) int64 {
if vm.Status.StartFailure == nil {
return 0
}
now := time.Now().UTC().Unix()
retryAfter := vm.Status.StartFailure.RetryAfterTimestamp.Time.UTC().Unix()
diff := retryAfter - now
if diff > 0 {
return diff
}
return 0
}
func syncStartFailureStatus(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) {
if shouldClearStartFailure(vm, vmi) {
// if a vmi associated with the vm hits a running phase, then reset the start failure counter
vm.Status.StartFailure = nil
} else if vmi != nil && vmiFailedEarly(vmi) {
// if the VMI failed without ever hitting running successfully,
// record this as a start failure so we can back off retrying
if vm.Status.StartFailure != nil && vm.Status.StartFailure.LastFailedVMIUID == vmi.UID {
// already counted this failure
return
}
count := 1
if vm.Status.StartFailure != nil {
count = vm.Status.StartFailure.ConsecutiveFailCount + 1
}
now := metav1.NewTime(time.Now())
delaySeconds := calculateStartBackoffTime(count, defaultMaxCrashLoopBackoffDelaySeconds)
retryAfter := metav1.NewTime(now.Time.Add(time.Duration(int64(delaySeconds)) * time.Second))
vm.Status.StartFailure = &virtv1.VirtualMachineStartFailure{
LastFailedVMIUID: vmi.UID,
RetryAfterTimestamp: &retryAfter,
ConsecutiveFailCount: count,
}
}
}
func syncVolumeMigration(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) {
if vm.Status.VolumeUpdateState == nil || vm.Status.VolumeUpdateState.VolumeMigrationState == nil {
return
}
vmCond := controller.NewVirtualMachineConditionManager()
vmiCond := controller.NewVirtualMachineInstanceConditionManager()
// Check if the volumes have been recovered and point to the original ones
srcMigVols := make(map[string]string)
for _, v := range vm.Status.VolumeUpdateState.VolumeMigrationState.MigratedVolumes {
if v.SourcePVCInfo != nil {
srcMigVols[v.VolumeName] = v.SourcePVCInfo.ClaimName
}
}
recoveredOldVMVolumes := true
for _, v := range vm.Spec.Template.Spec.Volumes {
name := storagetypes.PVCNameFromVirtVolume(&v)
origName, ok := srcMigVols[v.Name]
if !ok {
continue
}
if origName != name {
recoveredOldVMVolumes = false
}
}
if recoveredOldVMVolumes || (vm.Spec.UpdateVolumesStrategy == nil || *vm.Spec.UpdateVolumesStrategy != virtv1.UpdateVolumesStrategyMigration) {
vm.Status.VolumeUpdateState.VolumeMigrationState = nil
// Clean-up the volume change label when the volume set has been restored
vmCond.RemoveCondition(vm, virtv1.VirtualMachineConditionType(virtv1.VirtualMachineInstanceVolumesChange))
vmCond.RemoveCondition(vm, virtv1.VirtualMachineManualRecoveryRequired)
return
}
if vmi == nil || vmi.IsFinal() {
if vmCond.HasConditionWithStatus(vm, virtv1.VirtualMachineConditionType(virtv1.VirtualMachineInstanceVolumesChange), k8score.ConditionTrue) {
// Something went wrong with the VMI while the volume migration was in progress
vmCond.UpdateCondition(vm, &virtv1.VirtualMachineCondition{
Type: virtv1.VirtualMachineManualRecoveryRequired,
Status: k8score.ConditionTrue,
Reason: "VMI was removed or was final during the volume migration",
})
}
return
}
// The volume migration has been cancelled
if cond := vmiCond.GetCondition(vmi, virtv1.VirtualMachineInstanceVolumesChange); cond != nil &&
cond.Status == k8score.ConditionFalse &&
cond.Reason == virtv1.VirtualMachineInstanceReasonVolumesChangeCancellation {
vm.Status.VolumeUpdateState.VolumeMigrationState = nil
}
}
// here is stop
func (c *Controller) stopVMI(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachine, error) {
if vmi == nil || vmi.DeletionTimestamp != nil {
// nothing to do
return vm, nil
}
vmKey, err := controller.KeyFunc(vm)
if err != nil {
log.Log.Object(vm).Reason(err).Error(failedExtractVmkeyFromVmErrMsg)
return vm, nil
}
// stop it
c.expectations.ExpectDeletions(vmKey, []string{controller.VirtualMachineInstanceKey(vmi)})
err = c.clientset.VirtualMachineInstance(vm.ObjectMeta.Namespace).Delete(context.Background(), vmi.ObjectMeta.Name, metav1.DeleteOptions{})
// Don't log an error if it is already deleted
if err != nil {
// We can't observe a delete if it was not accepted by the server
c.expectations.DeletionObserved(vmKey, controller.VirtualMachineInstanceKey(vmi))
c.recorder.Eventf(vm, k8score.EventTypeWarning, common.FailedDeleteVirtualMachineReason, "Error deleting virtual machine instance %s: %v", vmi.ObjectMeta.Name, err)
return vm, err
}
vm = c.cleanupRestartRequired(vm)
c.recorder.Eventf(vm, k8score.EventTypeNormal, common.SuccessfulDeleteVirtualMachineReason, "Stopped the virtual machine by deleting the virtual machine instance %v", vmi.ObjectMeta.UID)
log.Log.Object(vm).Infof("Dispatching delete event for vmi %s with phase %s", controller.NamespacedKey(vmi.Namespace, vmi.Name), vmi.Status.Phase)
return vm, nil
}
func popStateChangeRequest(vm *virtv1.VirtualMachine) {
vm.Status.StateChangeRequests = vm.Status.StateChangeRequests[1:]
}
func vmRevisionName(vmUID types.UID) string {
return fmt.Sprintf("revision-start-vm-%s", vmUID)
}
func getVMRevisionName(vmUID types.UID, generation int64) string {
return fmt.Sprintf("%s-%d", vmRevisionName(vmUID), generation)
}
func patchVMRevision(vm *virtv1.VirtualMachine) ([]byte, error) {
vmCopy := vm.DeepCopy()
if vmCopy.Spec.Instancetype != nil && revision.HasControllerRevisionRef(vmCopy.Status.InstancetypeRef) {
vmCopy.Spec.Instancetype.RevisionName = vmCopy.Status.InstancetypeRef.ControllerRevisionRef.Name
}
if vmCopy.Spec.Preference != nil && revision.HasControllerRevisionRef(vm.Status.PreferenceRef) {
vmCopy.Spec.Preference.RevisionName = vm.Status.PreferenceRef.ControllerRevisionRef.Name
}
vmBytes, err := json.Marshal(vmCopy)
if err != nil {
return nil, err
}
var raw map[string]interface{}
err = json.Unmarshal(vmBytes, &raw)
if err != nil {
return nil, err
}
objCopy := make(map[string]interface{})
spec := raw["spec"].(map[string]interface{})
objCopy["spec"] = spec
patch, err := json.Marshal(objCopy)
return patch, err
}
func (c *Controller) deleteOlderVMRevision(vm *virtv1.VirtualMachine) (bool, error) {
keys, err := c.crIndexer.IndexKeys("vm", string(vm.UID))
if err != nil {
return false, err
}
createNotNeeded := false
for _, key := range keys {
if !strings.Contains(key, vmRevisionName(vm.UID)) {
continue
}
storeObj, exists, err := c.crIndexer.GetByKey(key)
if !exists || err != nil {
return false, err
}
cr, ok := storeObj.(*appsv1.ControllerRevision)
if !ok {
return false, fmt.Errorf("unexpected resource %+v", storeObj)
}
if cr.Revision == vm.ObjectMeta.Generation {
createNotNeeded = true
continue
}
err = c.clientset.AppsV1().ControllerRevisions(vm.Namespace).Delete(context.Background(), cr.Name, metav1.DeleteOptions{})
if err != nil {
return false, err
}
}
return createNotNeeded, nil
}
// getControllerRevision attempts to get the controller revision by name and
// namespace. It will return (nil, nil) if the controller revision is not found.
func (c *Controller) getControllerRevision(namespace string, name string) (*appsv1.ControllerRevision, error) {
cr, err := c.clientset.AppsV1().ControllerRevisions(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
if apiErrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
return cr, nil
}
func (c *Controller) getVMSpecForKey(key string) (*virtv1.VirtualMachineSpec, error) {
obj, exists, err := c.crIndexer.GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("could not find key %s", key)
}
cr, ok := obj.(*appsv1.ControllerRevision)
if !ok {
return nil, fmt.Errorf("unexpected resource %+v", obj)
}
revisionData := VirtualMachineRevisionData{}
err = json.Unmarshal(cr.Data.Raw, &revisionData)
if err != nil {
return nil, err
}
return &revisionData.Spec, nil
}
func genFromKey(key string) (int64, error) {
items := strings.Split(key, "-")
genString := items[len(items)-1]
return strconv.ParseInt(genString, 10, 64)
}
func (c *Controller) getLastVMRevisionSpec(vm *virtv1.VirtualMachine) (*virtv1.VirtualMachineSpec, error) {
keys, err := c.crIndexer.IndexKeys("vm", string(vm.UID))
if err != nil {
return nil, err
}
if len(keys) == 0 {
return nil, nil
}
var highestGen int64 = 0
var key string
for _, k := range keys {
if !strings.Contains(k, vmRevisionName(vm.UID)) {
continue
}
gen, err := genFromKey(k)
if err != nil {
return nil, fmt.Errorf("invalid key: %s", k)
}
if gen > highestGen {
if key != "" {
log.Log.Object(vm).Warningf("expected no more than 1 revision, found at least 2")
}
highestGen = gen
key = k
}
}
if key == "" {
return nil, nil
}
return c.getVMSpecForKey(key)
}
func (c *Controller) createVMRevision(vm *virtv1.VirtualMachine) (string, error) {
vmRevisionName := getVMRevisionName(vm.UID, vm.Generation)
createNotNeeded, err := c.deleteOlderVMRevision(vm)
if err != nil || createNotNeeded {
return vmRevisionName, err
}
patch, err := patchVMRevision(vm)
if err != nil {
return "", err
}
cr := &appsv1.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{
Name: vmRevisionName,
Namespace: vm.Namespace,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(vm, virtv1.VirtualMachineGroupVersionKind)},
},
Data: runtime.RawExtension{Raw: patch},
Revision: vm.ObjectMeta.Generation,
}
_, err = c.clientset.AppsV1().ControllerRevisions(vm.Namespace).Create(context.Background(), cr, metav1.CreateOptions{})
if err != nil {
return "", err
}
return cr.Name, nil
}
// SetupVMIfromVM creates a VirtualMachineInstance object from one VirtualMachine object.
func SetupVMIFromVM(vm *virtv1.VirtualMachine) *virtv1.VirtualMachineInstance {
vmi := libvmi.New()
vmi.ObjectMeta = *vm.Spec.Template.ObjectMeta.DeepCopy()
vmi.ObjectMeta.Name = vm.ObjectMeta.Name
vmi.ObjectMeta.GenerateName = ""
vmi.ObjectMeta.Namespace = vm.ObjectMeta.Namespace
vmi.Spec = *vm.Spec.Template.Spec.DeepCopy()
if hasStartPausedRequest(vm) {
strategy := virtv1.StartStrategyPaused
vmi.Spec.StartStrategy = &strategy
}
// prevent from retriggering memory dump after shutdown if memory dump is complete
if memorydump.HasCompleted(vm) {
vmi.Spec = *memorydump.RemoveMemoryDumpVolumeFromVMISpec(&vmi.Spec, vm.Status.MemoryDumpRequest.ClaimName)
}
setupStableFirmwareUUID(vm, vmi)
// TODO check if vmi labels exist, and when make sure that they match. For now just override them
vmi.ObjectMeta.Labels = vm.Spec.Template.ObjectMeta.Labels
vmi.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(vm, virtv1.VirtualMachineGroupVersionKind),
}
util.SetDefaultVolumeDisk(&vmi.Spec)
return vmi
}
func hasStartPausedRequest(vm *virtv1.VirtualMachine) bool {
if len(vm.Status.StateChangeRequests) == 0 {
return false
}
stateChange := vm.Status.StateChangeRequests[0]
pausedValue, hasPaused := stateChange.Data[virtv1.StartRequestDataPausedKey]
return stateChange.Action == virtv1.StartRequest &&
hasPaused &&
pausedValue == virtv1.StartRequestDataPausedTrue
}
func hasStartRequest(vm *virtv1.VirtualMachine) bool {
if len(vm.Status.StateChangeRequests) == 0 {
return false
}
stateChange := vm.Status.StateChangeRequests[0]
return stateChange.Action == virtv1.StartRequest
}
func hasStopRequestForVMI(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if len(vm.Status.StateChangeRequests) == 0 {
return false
}
stateChange := vm.Status.StateChangeRequests[0]
return stateChange.Action == virtv1.StopRequest &&
stateChange.UID != nil &&
*stateChange.UID == vmi.UID
}
// setStableUUID makes sure the VirtualMachineInstance being started has a 'stable' UUID.
// The UUID is 'stable' if doesn't change across reboots.
func setupStableFirmwareUUID(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) {
logger := log.Log.Object(vm)
if vmi.Spec.Domain.Firmware == nil {
vmi.Spec.Domain.Firmware = &virtv1.Firmware{}
}
existingUUID := vmi.Spec.Domain.Firmware.UUID
if existingUUID != "" {
logger.V(4).Infof("Using existing UUID '%s'", existingUUID)
return
}
vmi.Spec.Domain.Firmware.UUID = CalculateLegacyUUID(vmi.Name)
}
// listControllerFromNamespace takes a namespace and returns all VirtualMachines
// from the VirtualMachine cache which run in this namespace
func (c *Controller) listControllerFromNamespace(namespace string) ([]*virtv1.VirtualMachine, error) {
objs, err := c.vmIndexer.ByIndex(cache.NamespaceIndex, namespace)
if err != nil {
return nil, err
}
var vms []*virtv1.VirtualMachine
for _, obj := range objs {
vm := obj.(*virtv1.VirtualMachine)
vms = append(vms, vm)
}
return vms, nil
}
// getMatchingControllers returns the list of VirtualMachines which matches
// the labels of the VirtualMachineInstance from the listener cache. If there are no matching
// controllers nothing is returned
func (c *Controller) getMatchingControllers(vmi *virtv1.VirtualMachineInstance) (vms []*virtv1.VirtualMachine) {
controllers, err := c.listControllerFromNamespace(vmi.ObjectMeta.Namespace)
if err != nil {
return nil
}
// TODO check owner reference, if we have an existing controller which owns this one
for _, vm := range controllers {
if vmi.Name == vm.Name {
vms = append(vms, vm)
}
}
return vms
}
// When a vmi is created, enqueue the VirtualMachine that manages it and update its expectations.
func (c *Controller) addVirtualMachineInstance(obj interface{}) {
vmi := obj.(*virtv1.VirtualMachineInstance)
log.Log.Object(vmi).V(4).Info("VirtualMachineInstance added.")
if vmi.DeletionTimestamp != nil {
// on a restart of the controller manager, it's possible a new vmi shows up in a state that
// is already pending deletion. Prevent the vmi from being a creation observation.
c.deleteVirtualMachineInstance(vmi)
return
}
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(vmi); controllerRef != nil {
log.Log.Object(vmi).V(4).Info("Looking for VirtualMachineInstance Ref")
vm := c.resolveControllerRef(vmi.Namespace, controllerRef)
if vm == nil {
// not managed by us
log.Log.Object(vmi).V(4).Infof("Cant find the matching VM for VirtualMachineInstance: %s", vmi.Name)
return
}
vmKey, err := controller.KeyFunc(vm)
if err != nil {
log.Log.Object(vmi).Errorf("Cannot parse key of VM: %s for VirtualMachineInstance: %s", vm.Name, vmi.Name)
return
}
log.Log.Object(vmi).V(4).Infof("VirtualMachineInstance created because %s was added.", vmi.Name)
c.expectations.CreationObserved(vmKey)
c.enqueueVm(vm)
return
}
// Otherwise, it's an orphan. Get a list of all matching VirtualMachines and sync
// them to see if anyone wants to adopt it.
// DO NOT observe creation because no controller should be waiting for an
// orphan.
vms := c.getMatchingControllers(vmi)
if len(vms) == 0 {
return
}
log.Log.V(4).Object(vmi).Infof("Orphan VirtualMachineInstance created")
for _, vm := range vms {
c.enqueueVm(vm)
}
}
// When a vmi is updated, figure out what VirtualMachine manage it and wake them
// up. If the labels of the vmi have changed we need to awaken both the old
// and new VirtualMachine. old and cur must be *v1.VirtualMachineInstance types.
func (c *Controller) updateVirtualMachineInstance(old, cur interface{}) {
curVMI := cur.(*virtv1.VirtualMachineInstance)
oldVMI := old.(*virtv1.VirtualMachineInstance)
if curVMI.ResourceVersion == oldVMI.ResourceVersion {
// Periodic resync will send update events for all known vmis.
// Two different versions of the same vmi will always have different RVs.
return
}
labelChanged := !equality.Semantic.DeepEqual(curVMI.Labels, oldVMI.Labels)
if curVMI.DeletionTimestamp != nil {
// when a vmi is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
// and after such time has passed, the virt-handler actually deletes it from the store. We receive an update
// for modification of the deletion timestamp and expect an VirtualMachine to create newVMI asap, not wait
// until the virt-handler actually deletes the vmi. This is different from the Phase of a vmi changing, because
// an rs never initiates a phase change, and so is never asleep waiting for the same.
c.deleteVirtualMachineInstance(curVMI)
if labelChanged {
// we don't need to check the oldVMI.DeletionTimestamp because DeletionTimestamp cannot be unset.
c.deleteVirtualMachineInstance(oldVMI)
}
return
}
curControllerRef := metav1.GetControllerOf(curVMI)
oldControllerRef := metav1.GetControllerOf(oldVMI)
controllerRefChanged := !equality.Semantic.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged && oldControllerRef != nil {
// The ControllerRef was changed. Sync the old controller, if any.
if vm := c.resolveControllerRef(oldVMI.Namespace, oldControllerRef); vm != nil {
c.enqueueVm(vm)
}
}
// If it has a ControllerRef, that's all that matters.
if curControllerRef != nil {
vm := c.resolveControllerRef(curVMI.Namespace, curControllerRef)
if vm == nil {
return
}
log.Log.V(4).Object(curVMI).Infof("VirtualMachineInstance updated")
c.enqueueVm(vm)
// TODO: MinReadySeconds in the VirtualMachineInstance will generate an Available condition to be added in
// Update once we support the available conect on the rs
return
}
isOrphan := !labelChanged && !controllerRefChanged
if isOrphan {
return
}
// If anything changed, sync matching controllers to see if anyone wants to adopt it now.
vms := c.getMatchingControllers(curVMI)
if len(vms) == 0 {
return
}
log.Log.V(4).Object(curVMI).Infof("Orphan VirtualMachineInstance updated")
for _, vm := range vms {
c.enqueueVm(vm)
}
}
// When a vmi is deleted, enqueue the VirtualMachine that manages the vmi and update its expectations.
// obj could be an *v1.VirtualMachineInstance, or a DeletionFinalStateUnknown marker item.
func (c *Controller) deleteVirtualMachineInstance(obj interface{}) {
vmi, ok := obj.(*virtv1.VirtualMachineInstance)
// When a delete is dropped, the relist will notice a vmi in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the vmi
// changed labels the new VirtualMachine will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error(failedProcessDeleteNotificationErrMsg)
return
}
vmi, ok = tombstone.Obj.(*virtv1.VirtualMachineInstance)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a vmi %#v", obj)).Error(failedProcessDeleteNotificationErrMsg)
return
}
}
controllerRef := metav1.GetControllerOf(vmi)
if controllerRef == nil {
// No controller should care about orphans being deleted.
return
}
vm := c.resolveControllerRef(vmi.Namespace, controllerRef)
if vm == nil {
return
}
vmKey, err := controller.KeyFunc(vm)
if err != nil {
return
}
c.expectations.DeletionObserved(vmKey, controller.VirtualMachineInstanceKey(vmi))
c.enqueueVm(vm)
}
func (c *Controller) listVMsMatchingPVC(namespace, pvcName string) ([]*virtv1.VirtualMachine, error) {
vms := []*virtv1.VirtualMachine{}
for _, indexName := range []string{"dv", "pvc"} {
objs, err := c.vmIndexer.ByIndex(indexName, namespace+"/"+pvcName)
if err != nil {
return nil, err
}
for _, obj := range objs {
vm := obj.(*virtv1.VirtualMachine)
vms = append(vms, vm.DeepCopy())
}
}
return vms, nil
}
// addPVC handles the addition of a PVC, enqueuing affected VMIs.
func (c *Controller) addPVC(obj interface{}) {
pvc := obj.(*k8score.PersistentVolumeClaim)
if pvc.DeletionTimestamp != nil {
return
}
vms, err := c.listVMsMatchingPVC(pvc.Namespace, pvc.Name)
if err != nil {
return
}
for _, vm := range vms {
log.Log.V(5).Object(pvc).Infof("PVC created for vm %s", vm.Name)
c.enqueueVm(vm)
}
}
func (c *Controller) addDataVolume(obj interface{}) {
dataVolume := obj.(*cdiv1.DataVolume)
if dataVolume.DeletionTimestamp != nil {
c.deleteDataVolume(dataVolume)
return
}
controllerRef := metav1.GetControllerOf(dataVolume)
if controllerRef != nil {
log.Log.Object(dataVolume).Info("Looking for DataVolume Ref")
vm := c.resolveControllerRef(dataVolume.Namespace, controllerRef)
if vm != nil {
vmKey, err := controller.KeyFunc(vm)
if err != nil {
log.Log.Object(dataVolume).Errorf("Cannot parse key of VM: %s for DataVolume: %s", vm.Name, dataVolume.Name)
} else {
log.Log.Object(dataVolume).Infof("DataVolume created because %s was added.", dataVolume.Name)
c.dataVolumeExpectations.CreationObserved(vmKey)
}
} else {
log.Log.Object(dataVolume).Errorf("Cant find the matching VM for DataVolume: %s", dataVolume.Name)
}
}
c.queueVMsForDataVolume(dataVolume)
}
func (c *Controller) updateDataVolume(old, cur interface{}) {
curDataVolume := cur.(*cdiv1.DataVolume)
oldDataVolume := old.(*cdiv1.DataVolume)
if curDataVolume.ResourceVersion == oldDataVolume.ResourceVersion {
// Periodic resync will send update events for all known DataVolumes.
// Two different versions of the same dataVolume will always
// have different RVs.
return
}
labelChanged := !equality.Semantic.DeepEqual(curDataVolume.Labels, oldDataVolume.Labels)
if curDataVolume.DeletionTimestamp != nil {
// having a DataVolume marked for deletion is enough
// to count as a deletion expectation
c.deleteDataVolume(curDataVolume)
if labelChanged {
// we don't need to check the oldDataVolume.DeletionTimestamp
// because DeletionTimestamp cannot be unset.
c.deleteDataVolume(oldDataVolume)
}
return
}
curControllerRef := metav1.GetControllerOf(curDataVolume)
oldControllerRef := metav1.GetControllerOf(oldDataVolume)
controllerRefChanged := !equality.Semantic.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged && oldControllerRef != nil {
// The ControllerRef was changed. Sync the old controller, if any.
if vm := c.resolveControllerRef(oldDataVolume.Namespace, oldControllerRef); vm != nil {
c.enqueueVm(vm)
}
}
c.queueVMsForDataVolume(curDataVolume)
}
func (c *Controller) deleteDataVolume(obj interface{}) {
dataVolume, ok := obj.(*cdiv1.DataVolume)
// When a delete is dropped, the relist will notice a dataVolume in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the dataVolume
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf("couldn't get object from tombstone %+v", obj)).Error(failedProcessDeleteNotificationErrMsg)
return
}
dataVolume, ok = tombstone.Obj.(*cdiv1.DataVolume)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a dataVolume %#v", obj)).Error(failedProcessDeleteNotificationErrMsg)
return
}
}
if controllerRef := metav1.GetControllerOf(dataVolume); controllerRef != nil {
if vm := c.resolveControllerRef(dataVolume.Namespace, controllerRef); vm != nil {
if vmKey, err := controller.KeyFunc(vm); err == nil {
c.dataVolumeExpectations.DeletionObserved(vmKey, controller.DataVolumeKey(dataVolume))
}
}
}
c.queueVMsForDataVolume(dataVolume)
}
func (c *Controller) queueVMsForDataVolume(dataVolume *cdiv1.DataVolume) {
var vmOwner string
if controllerRef := metav1.GetControllerOf(dataVolume); controllerRef != nil {
if vm := c.resolveControllerRef(dataVolume.Namespace, controllerRef); vm != nil {
vmOwner = vm.Name
log.Log.V(4).Object(dataVolume).Infof("DataVolume updated for vm %s", vm.Name)
c.enqueueVm(vm)
}
}
// handle DataVolumes not owned by the VM but referenced in the spec
// TODO come back when DV/PVC name may differ
k, err := controller.KeyFunc(dataVolume)
if err != nil {
log.Log.Object(dataVolume).Errorf("Cannot parse key of DataVolume: %s", dataVolume.Name)
return
}
for _, indexName := range []string{"dv", "pvc"} {
objs, err := c.vmIndexer.ByIndex(indexName, k)
if err != nil {
log.Log.Object(dataVolume).Errorf("Cannot get index %s of DataVolume: %s", indexName, dataVolume.Name)
return
}
for _, obj := range objs {
vm := obj.(*virtv1.VirtualMachine)
if vm.Name != vmOwner {
log.Log.V(4).Object(dataVolume).Infof("DataVolume updated for vm %s", vm.Name)
c.enqueueVm(vm)
}
}
}
}
func (c *Controller) addVirtualMachine(obj interface{}) {
c.enqueueVm(obj)
}
func (c *Controller) deleteVirtualMachine(obj interface{}) {
c.enqueueVm(obj)
}
func (c *Controller) updateVirtualMachine(_, curr interface{}) {
c.enqueueVm(curr)
}
func (c *Controller) enqueueVm(obj interface{}) {
logger := log.Log
vm := obj.(*virtv1.VirtualMachine)
key, err := controller.KeyFunc(vm)
if err != nil {
logger.Object(vm).Reason(err).Error(failedExtractVmkeyFromVmErrMsg)
return
}
c.Queue.Add(key)
}
func (c *Controller) getPatchFinalizerOps(oldFinalizers, newFinalizers []string) ([]byte, error) {
return patch.New(
patch.WithTest("/metadata/finalizers", oldFinalizers),
patch.WithReplace("/metadata/finalizers", newFinalizers)).
GeneratePayload()
}
func (c *Controller) removeVMIFinalizer(vmi *virtv1.VirtualMachineInstance) error {
if !controller.HasFinalizer(vmi, virtv1.VirtualMachineControllerFinalizer) {
return nil
}
log.Log.V(3).Object(vmi).Infof("VMI is in a final state. Removing VM controller finalizer")
newFinalizers := []string{}
for _, fin := range vmi.Finalizers {
if fin != virtv1.VirtualMachineControllerFinalizer {
newFinalizers = append(newFinalizers, fin)
}
}
patch, err := c.getPatchFinalizerOps(vmi.Finalizers, newFinalizers)
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
return err
}
func (c *Controller) removeVMFinalizer(vm *virtv1.VirtualMachine) (*virtv1.VirtualMachine, error) {
if !controller.HasFinalizer(vm, virtv1.VirtualMachineControllerFinalizer) {
return vm, nil
}
log.Log.V(3).Object(vm).Infof("Removing VM controller finalizer: %s", virtv1.VirtualMachineControllerFinalizer)
newFinalizers := []string{}
for _, fin := range vm.Finalizers {
if fin != virtv1.VirtualMachineControllerFinalizer {
newFinalizers = append(newFinalizers, fin)
}
}
patch, err := c.getPatchFinalizerOps(vm.Finalizers, newFinalizers)
if err != nil {
return vm, err
}
vm, err = c.clientset.VirtualMachine(vm.Namespace).Patch(context.Background(), vm.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
return vm, err
}
func (c *Controller) addVMFinalizer(vm *virtv1.VirtualMachine) (*virtv1.VirtualMachine, error) {
if controller.HasFinalizer(vm, virtv1.VirtualMachineControllerFinalizer) {
return vm, nil
}
log.Log.V(3).Object(vm).Infof("Adding VM controller finalizer: %s", virtv1.VirtualMachineControllerFinalizer)
newFinalizers := make([]string, len(vm.Finalizers))
copy(newFinalizers, vm.Finalizers)
newFinalizers = append(newFinalizers, virtv1.VirtualMachineControllerFinalizer)
patch, err := c.getPatchFinalizerOps(vm.Finalizers, newFinalizers)
if err != nil {
return vm, err
}
return c.clientset.VirtualMachine(vm.Namespace).Patch(context.Background(), vm.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
}
// parseGeneration will parse for the last value after a '-'. It is assumed the
// revision name is created with getVMRevisionName. If the name is not formatted
// correctly and the generation cannot be found, then nil will be returned.
func parseGeneration(revisionName string, logger *log.FilteredLogger) *int64 {
idx := strings.LastIndexAny(revisionName, "-")
if idx == -1 {
logger.Errorf("Failed to parse generation as an int from revision %v", revisionName)
return nil
}
generationStr := revisionName[idx+1:]
generation, err := strconv.ParseInt(generationStr, 10, 64)
if err != nil {
logger.Reason(err).Errorf("Failed to parse generation as an int from revision %v", revisionName)
return nil
}
return &generation
}
// patchVmGenerationFromControllerRevision will first fetch the generation from
// the corresponding controller revision, and then patch the vmi with the
// generation annotation. If the controller revision does not exist,
// (nil, nil) will be returned.
func (c *Controller) patchVmGenerationFromControllerRevision(vmi *virtv1.VirtualMachineInstance, logger *log.FilteredLogger) (*virtv1.VirtualMachineInstance, *int64, error) {
cr, err := c.getControllerRevision(vmi.Namespace, vmi.Status.VirtualMachineRevisionName)
if err != nil || cr == nil {
return vmi, nil, err
}
generation := parseGeneration(cr.Name, logger)
if generation == nil {
return vmi, nil, nil
}
vmi, err = c.patchVmGenerationAnnotationOnVmi(*generation, vmi)
if err != nil {
return vmi, generation, err
}
return vmi, generation, err
}
// syncGenerationInfo will update the vm.Status with the ObservedGeneration
// from the vmi and the DesiredGeneration from the vm current generation.
func (c *Controller) syncGenerationInfo(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance, logger *log.FilteredLogger) (*virtv1.VirtualMachineInstance, error) {
if vm == nil || vmi == nil {
return vmi, errors.New("passed nil pointer")
}
generation, err := getGenerationAnnotationAsInt(vmi, logger)
if err != nil {
return vmi, err
}
// If the generation annotation does not exist, the VMI could have been
// been created before the controller was updated. In this case, check the
// ControllerRevision on what the latest observed generation is and back-fill
// the info onto the vmi annotation.
if generation == nil {
var patchedVMI *virtv1.VirtualMachineInstance
patchedVMI, generation, err = c.patchVmGenerationFromControllerRevision(vmi, logger)
if generation == nil || err != nil {
return vmi, err
}
vmi = patchedVMI
}
vm.Status.ObservedGeneration = *generation
vm.Status.DesiredGeneration = vm.Generation
return vmi, nil
}
func (c *Controller) updateStatus(vm, vmOrig *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance, syncErr common.SyncError, logger *log.FilteredLogger) error {
key := controller.VirtualMachineKey(vmOrig)
defer virtControllerVMWorkQueueTracer.StepTrace(key, "updateStatus", trace.Field{Key: "VM Name", Value: vmOrig.Name})
created := vmi != nil
vm.Status.Created = created
ready := false
if created {
ready = controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceReady, k8score.ConditionTrue)
var err error
vmi, err = c.syncGenerationInfo(vm, vmi, logger)
if err != nil {
return err
}
}
vm.Status.Ready = ready
runStrategy, _ := vmOrig.RunStrategy()
// sync for the first time only when the VMI gets created
// so that we can tell if the VM got started at least once
if vm.Status.RunStrategy != "" || vm.Status.Created {
vm.Status.RunStrategy = runStrategy
}
c.trimDoneVolumeRequests(vm)
memorydump.UpdateRequest(vm, vmi)
if c.isTrimFirstChangeRequestNeeded(vm, vmi) {
popStateChangeRequest(vm)
}
syncStartFailureStatus(vm, vmi)
// On a successful migration, the volume change condition is removed and we need to detect the removal before the synchronization of the VMI
// condition to the VM
syncVolumeMigration(vm, vmi)
syncConditions(vm, vmi, syncErr)
c.setPrintableStatus(vm, vmi)
cbt.SyncVMChangedBlockTrackingState(vm, vmi, c.clusterConfig, c.namespaceStore)
// only update if necessary
if !equality.Semantic.DeepEqual(vm.Status, vmOrig.Status) {
if _, err := c.clientset.VirtualMachine(vm.Namespace).UpdateStatus(context.Background(), vm, v1.UpdateOptions{}); err != nil {
return err
}
}
syncVMIDeleted := vmi != nil && vmi.IsWaitingForSync() && vmi.DeletionTimestamp != nil
if vmi != nil && (vmi.IsFinal() || syncVMIDeleted) && len(vmi.Finalizers) > 0 {
// Remove our finalizer off of a finalized VMI now that we've been able
// to record any status info from the VMI onto the VM object.
err := c.removeVMIFinalizer(vmi)
if err != nil {
return err
}
}
return nil
}
func (c *Controller) setPrintableStatus(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) {
// For each status, there's a separate function that evaluates
// whether the status is "true" for the given VM.
//
// Note that these statuses aren't mutually exclusive,
// and several of them can be "true" at the same time
// (e.g., Running && Migrating, or Paused && Terminating).
//
// The actual precedence of these statuses are determined by the order
// of evaluation - first match wins.
statuses := []struct {
statusType virtv1.VirtualMachinePrintableStatus
statusFunc func(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool
}{
{virtv1.VirtualMachineStatusTerminating, c.isVirtualMachineStatusTerminating},
{virtv1.VirtualMachineStatusStopping, c.isVirtualMachineStatusStopping},
{virtv1.VirtualMachineStatusMigrating, c.isVirtualMachineStatusMigrating},
{virtv1.VirtualMachineStatusPaused, c.isVirtualMachineStatusPaused},
{virtv1.VirtualMachineStatusRunning, c.isVirtualMachineStatusRunning},
{virtv1.VirtualMachineStatusPvcNotFound, c.isVirtualMachineStatusPvcNotFound},
{virtv1.VirtualMachineStatusDataVolumeError, c.isVirtualMachineStatusDataVolumeError},
{virtv1.VirtualMachineStatusUnschedulable, c.isVirtualMachineStatusUnschedulable},
{virtv1.VirtualMachineStatusProvisioning, c.isVirtualMachineStatusProvisioning},
{virtv1.VirtualMachineStatusWaitingForVolumeBinding, c.isVirtualMachineStatusWaitingForVolumeBinding},
{virtv1.VirtualMachineStatusErrImagePull, c.isVirtualMachineStatusErrImagePull},
{virtv1.VirtualMachineStatusImagePullBackOff, c.isVirtualMachineStatusImagePullBackOff},
{virtv1.VirtualMachineStatusStarting, c.isVirtualMachineStatusStarting},
{virtv1.VirtualMachineStatusCrashLoopBackOff, c.isVirtualMachineStatusCrashLoopBackOff},
{virtv1.VirtualMachineStatusStopped, c.isVirtualMachineStatusStopped},
{virtv1.VirtualMachineStatusWaitingForReceiver, c.isVirtualMachineWaitingReceiver},
}
for _, status := range statuses {
if status.statusFunc(vm, vmi) {
vm.Status.PrintableStatus = status.statusType
return
}
}
vm.Status.PrintableStatus = virtv1.VirtualMachineStatusUnknown
}
// isVirtualMachineStatusCrashLoopBackOff determines whether the VM status field should be set to "CrashLoop".
func (c *Controller) isVirtualMachineStatusCrashLoopBackOff(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if vmi != nil && !vmi.IsFinal() {
return false
} else if c.isVMIStartExpected(vm) {
return false
}
runStrategy, err := vm.RunStrategy()
if err != nil {
log.Log.Object(vm).Errorf(fetchingRunStrategyErrFmt, err)
return false
}
if vm.Status.StartFailure != nil &&
vm.Status.StartFailure.ConsecutiveFailCount > 0 &&
(runStrategy == virtv1.RunStrategyAlways || runStrategy == virtv1.RunStrategyRerunOnFailure || runStrategy == virtv1.RunStrategyOnce) {
return true
}
return false
}
// isVirtualMachineStatusStopped determines whether the VM status field should be set to "Stopped".
func (c *Controller) isVirtualMachineStatusStopped(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if vmi != nil {
return vmi.IsFinal()
}
return !c.isVMIStartExpected(vm)
}
// isVirtualMachineStatusStopped determines whether the VM status field should be set to "Provisioning".
func (c *Controller) isVirtualMachineStatusProvisioning(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
return storagetypes.HasDataVolumeProvisioning(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.dataVolumeStore)
}
// isVirtualMachineStatusWaitingForVolumeBinding
func (c *Controller) isVirtualMachineStatusWaitingForVolumeBinding(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if !isSetToStart(vm, vmi) {
return false
}
return storagetypes.HasUnboundPVC(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.pvcStore)
}
// isVirtualMachineStatusStarting determines whether the VM status field should be set to "Starting".
func (c *Controller) isVirtualMachineStatusStarting(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if vmi == nil {
return c.isVMIStartExpected(vm)
}
return vmi.IsUnprocessed() || vmi.IsScheduling() || vmi.IsScheduled()
}
// isVirtualMachineStatusRunning determines whether the VM status field should be set to "Running".
func (c *Controller) isVirtualMachineStatusRunning(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if vmi == nil {
return false
}
hasPausedCondition := controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatus(vmi,
virtv1.VirtualMachineInstancePaused, k8score.ConditionTrue)
return vmi.IsRunning() && !hasPausedCondition
}
// isVirtualMachineStatusPaused determines whether the VM status field should be set to "Paused".
func (c *Controller) isVirtualMachineStatusPaused(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if vmi == nil {
return false
}
hasPausedCondition := controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatus(vmi,
virtv1.VirtualMachineInstancePaused, k8score.ConditionTrue)
return vmi.IsRunning() && hasPausedCondition
}
// isVirtualMachineStatusStopping determines whether the VM status field should be set to "Stopping".
func (c *Controller) isVirtualMachineStatusStopping(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
return vmi != nil && !vmi.IsFinal() &&
(vmi.IsMarkedForDeletion() || c.isVMIStopExpected(vm))
}
// isVirtualMachineStatusTerminating determines whether the VM status field should be set to "Terminating".
func (c *Controller) isVirtualMachineStatusTerminating(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
return vm.ObjectMeta.DeletionTimestamp != nil
}
// isVirtualMachineStatusMigrating determines whether the VM status field should be set to "Migrating".
func (c *Controller) isVirtualMachineStatusMigrating(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
return vmi != nil && migrations.IsMigrating(vmi)
}
// isVirtualMachineStatusUnschedulable determines whether the VM status field should be set to "FailedUnschedulable".
func (c *Controller) isVirtualMachineStatusUnschedulable(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
return controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatusAndReason(vmi,
virtv1.VirtualMachineInstanceConditionType(k8score.PodScheduled),
k8score.ConditionFalse,
k8score.PodReasonUnschedulable)
}
// isVirtualMachineStatusErrImagePull determines whether the VM status field should be set to "ErrImagePull"
func (c *Controller) isVirtualMachineStatusErrImagePull(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
syncCond := controller.NewVirtualMachineInstanceConditionManager().GetCondition(vmi, virtv1.VirtualMachineInstanceSynchronized)
return syncCond != nil && syncCond.Status == k8score.ConditionFalse && syncCond.Reason == controller.ErrImagePullReason
}
// isVirtualMachineStatusImagePullBackOff determines whether the VM status field should be set to "ImagePullBackOff"
func (c *Controller) isVirtualMachineStatusImagePullBackOff(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
syncCond := controller.NewVirtualMachineInstanceConditionManager().GetCondition(vmi, virtv1.VirtualMachineInstanceSynchronized)
return syncCond != nil && syncCond.Status == k8score.ConditionFalse && syncCond.Reason == controller.ImagePullBackOffReason
}
// isVirtualMachineStatusPvcNotFound determines whether the VM status field should be set to "FailedPvcNotFound".
func (c *Controller) isVirtualMachineStatusPvcNotFound(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
return controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatusAndReason(vmi,
virtv1.VirtualMachineInstanceSynchronized,
k8score.ConditionFalse,
controller.FailedPvcNotFoundReason)
}
// isVirtualMachineStatusDataVolumeError determines whether the VM status field should be set to "DataVolumeError"
func (c *Controller) isVirtualMachineStatusDataVolumeError(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
err := storagetypes.HasDataVolumeErrors(vm.Namespace, vm.Spec.Template.Spec.Volumes, c.dataVolumeStore)
if err != nil {
log.Log.Object(vm).Errorf("%v", err)
return true
}
return false
}
// isVirtualMachineWaitingReceiver determines whether the VM status field should be set to "WaitingForReceiver"
func (c *Controller) isVirtualMachineWaitingReceiver(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
runStrategy, err := vm.RunStrategy()
if err != nil {
log.Log.Object(vm).Errorf(fetchingRunStrategyErrFmt, err)
return false
}
return (vmi == nil || vmi.IsWaitingForSync()) && runStrategy == virtv1.RunStrategyWaitAsReceiver
}
func syncReadyConditionFromVMI(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) {
conditionManager := controller.NewVirtualMachineConditionManager()
vmiReadyCond := controller.NewVirtualMachineInstanceConditionManager().
GetCondition(vmi, virtv1.VirtualMachineInstanceReady)
now := metav1.Now()
if vmi == nil {
conditionManager.UpdateCondition(vm, &virtv1.VirtualMachineCondition{
Type: virtv1.VirtualMachineReady,
Status: k8score.ConditionFalse,
Reason: "VMINotExists",
Message: "VMI does not exist",
LastProbeTime: now,
LastTransitionTime: now,
})
} else if vmiReadyCond == nil {
conditionManager.UpdateCondition(vm, &virtv1.VirtualMachineCondition{
Type: virtv1.VirtualMachineReady,
Status: k8score.ConditionFalse,
Reason: "VMIConditionMissing",
Message: "VMI is missing the Ready condition",
LastProbeTime: now,
LastTransitionTime: now,
})
} else {
conditionManager.UpdateCondition(vm, &virtv1.VirtualMachineCondition{
Type: virtv1.VirtualMachineReady,
Status: vmiReadyCond.Status,
Reason: vmiReadyCond.Reason,
Message: vmiReadyCond.Message,
LastProbeTime: vmiReadyCond.LastProbeTime,
LastTransitionTime: vmiReadyCond.LastTransitionTime,
})
}
}
func syncConditions(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance, syncErr common.SyncError) {
cm := controller.NewVirtualMachineConditionManager()
// ready condition is handled differently as it persists regardless if vmi exists or not
syncReadyConditionFromVMI(vm, vmi)
processFailureCondition(vm, syncErr)
// nothing to do if vmi hasn't been created yet.
if vmi == nil {
return
}
// sync VMI conditions, ignore list represents conditions that are not synced generically
syncIgnoreMap := map[string]interface{}{
string(virtv1.VirtualMachineReady): nil,
string(virtv1.VirtualMachineFailure): nil,
string(virtv1.VirtualMachineRestartRequired): nil,
}
vmiCondMap := make(map[string]interface{})
// generically add/update all vmi conditions
for _, cond := range vmi.Status.Conditions {
_, ignore := syncIgnoreMap[string(cond.Type)]
if ignore {
continue
}
vmiCondMap[string(cond.Type)] = nil
cm.UpdateCondition(vm, &virtv1.VirtualMachineCondition{
Type: virtv1.VirtualMachineConditionType(cond.Type),
Status: cond.Status,
Reason: cond.Reason,
Message: cond.Message,
LastProbeTime: cond.LastProbeTime,
LastTransitionTime: cond.LastTransitionTime,
})
}
// remove vm conditions that don't exist on vmi (excluding the ignore list)
for _, cond := range vm.Status.Conditions {
_, ignore := syncIgnoreMap[string(cond.Type)]
if ignore {
continue
}
_, exists := vmiCondMap[string(cond.Type)]
if !exists {
cm.RemoveCondition(vm, cond.Type)
}
}
}
func processFailureCondition(vm *virtv1.VirtualMachine, syncErr common.SyncError) {
vmConditionManager := controller.NewVirtualMachineConditionManager()
if syncErr == nil {
if vmConditionManager.HasCondition(vm, virtv1.VirtualMachineFailure) {
log.Log.Object(vm).V(4).Info("Removing failure")
vmConditionManager.RemoveCondition(vm, virtv1.VirtualMachineFailure)
}
// nothing to do
return
}
vmConditionManager.UpdateCondition(vm, &virtv1.VirtualMachineCondition{
Type: virtv1.VirtualMachineFailure,
Reason: syncErr.Reason(),
Message: syncErr.Error(),
LastTransitionTime: metav1.Now(),
Status: k8score.ConditionTrue,
})
}
func (c *Controller) isTrimFirstChangeRequestNeeded(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (clearChangeRequest bool) {
if len(vm.Status.StateChangeRequests) == 0 {
return false
}
// Only consider one stateChangeRequest at a time. The second and subsequent change
// requests have not been acted upon by this controller yet!
stateChange := vm.Status.StateChangeRequests[0]
switch stateChange.Action {
case virtv1.StopRequest:
if vmi == nil {
// If there's no VMI, then the VMI was stopped, and the stopRequest can be cleared
log.Log.Object(vm).V(4).Infof("No VMI. Clearing stop request")
return true
} else {
if stateChange.UID == nil {
// It never makes sense to have a request to stop a VMI that doesn't
// have a UUID associated with it. This shouldn't be possible -- but if
// it occurs, clear the stopRequest because it can't be acted upon
log.Log.Object(vm).Errorf("Stop Request has no UID.")
return true
} else if *stateChange.UID != vmi.UID {
// If there is a VMI, but the UID doesn't match, then it
// must have been previously stopped, so the stopRequest can be cleared
log.Log.Object(vm).V(4).Infof("VMI's UID doesn't match. clearing stop request")
return true
}
}
case virtv1.StartRequest:
// Update VMI as the runStrategy might have started/stopped the VM.
// Example: if the runStrategy is `RerunOnFailure` and the VMI just failed
// `syncRunStrategy()` will delete the VMI object and enqueue a StartRequest.
// If we do not update `vmi` by asking the API Server this function could
// erroneously trim the just added StartRequest because it would see a running
// vmi with no DeletionTimestamp
if vmi != nil && vmi.DeletionTimestamp == nil && !vmi.IsFinal() {
log.Log.Object(vm).V(4).Infof("VMI exists. clearing start request")
return true
}
}
return false
}
func (c *Controller) trimDoneVolumeRequests(vm *virtv1.VirtualMachine) {
if len(vm.Status.VolumeRequests) == 0 {
return
}
volumeMap := make(map[string]virtv1.Volume)
diskMap := make(map[string]virtv1.Disk)
for _, volume := range vm.Spec.Template.Spec.Volumes {
volumeMap[volume.Name] = volume
}
for _, disk := range vm.Spec.Template.Spec.Domain.Devices.Disks {
diskMap[disk.Name] = disk
}
tmpVolRequests := vm.Status.VolumeRequests[:0]
for _, request := range vm.Status.VolumeRequests {
var added bool
var volName string
removeRequest := false
if request.AddVolumeOptions != nil {
volName = request.AddVolumeOptions.Name
added = true
} else if request.RemoveVolumeOptions != nil {
volName = request.RemoveVolumeOptions.Name
added = false
}
_, volExists := volumeMap[volName]
_, diskExists := diskMap[volName]
if added && volExists && diskExists {
removeRequest = true
} else if !added && !volExists && !diskExists {
removeRequest = true
}
if !removeRequest {
tmpVolRequests = append(tmpVolRequests, request)
}
}
vm.Status.VolumeRequests = tmpVolRequests
}
func validLiveUpdateVolumes(oldVMSpec *virtv1.VirtualMachineSpec, vm *virtv1.VirtualMachine) bool {
oldVols := storagetypes.GetVolumesByName(&oldVMSpec.Template.Spec)
// Evaluate if any volume has changed or has been added
for _, v := range vm.Spec.Template.Spec.Volumes {
oldVol, okOld := oldVols[v.Name]
switch {
// Changes for hotlpugged volumes are valid
case storagetypes.IsHotplugVolume(&v):
delete(oldVols, v.Name)
// The volume has been freshly added
case !okOld:
return false
// if the update strategy is migration the PVC/DV could have
// changed
case (v.VolumeSource.PersistentVolumeClaim != nil || v.VolumeSource.DataVolume != nil) &&
vm.Spec.UpdateVolumesStrategy != nil &&
*vm.Spec.UpdateVolumesStrategy == virtv1.UpdateVolumesStrategyMigration:
delete(oldVols, v.Name)
// The volume has changed
case !equality.Semantic.DeepEqual(*oldVol, v):
return false
default:
delete(oldVols, v.Name)
}
}
// Evaluate if any volumes were removed and they were hotplugged volumes
for _, v := range oldVols {
if !storagetypes.IsHotplugVolume(v) {
return false
}
}
return true
}
func validLiveUpdateDisks(oldVMSpec *virtv1.VirtualMachineSpec, vm *virtv1.VirtualMachine) bool {
oldDisks := storagetypes.GetDisksByName(&oldVMSpec.Template.Spec)
oldVols := storagetypes.GetVolumesByName(&oldVMSpec.Template.Spec)
vols := storagetypes.GetVolumesByName(&vm.Spec.Template.Spec)
newDisks := storagetypes.GetDisksByName(&vm.Spec.Template.Spec)
// Evaluate if any disk has changed or has been added
for _, newDisk := range vm.Spec.Template.Spec.Domain.Devices.Disks {
newVolume, okNewVolume := vols[newDisk.Name]
oldDisk, okOldDisk := oldDisks[newDisk.Name]
switch {
// Changes for disks associated to a hotpluggable volume are valid
case okNewVolume && storagetypes.IsHotplugVolume(newVolume):
delete(oldDisks, newDisk.Name)
// The disk has been freshly added
case !okOldDisk:
return false
// The disk has changed
case !equality.Semantic.DeepEqual(*oldDisk, newDisk):
return false
default:
delete(oldDisks, newDisk.Name)
}
}
// Evaluate if any disks were removed and they were hotplugged volumes
for _, oldDisk := range oldDisks {
v, ok := oldVols[oldDisk.Name]
if ok && !storagetypes.IsHotplugVolume(v) {
return false
}
// if a CDRom disk was removed from VM spec, a restart is required
_, newDiskExists := newDisks[oldDisk.Name]
if oldDisk.CDRom != nil && !newDiskExists {
return false
}
}
return true
}
func setRestartRequired(vm *virtv1.VirtualMachine, message string) {
vmConditions := controller.NewVirtualMachineConditionManager()
vmConditions.UpdateCondition(vm, &virtv1.VirtualMachineCondition{
Type: virtv1.VirtualMachineRestartRequired,
LastTransitionTime: metav1.Now(),
Status: k8score.ConditionTrue,
Message: message,
})
}
// addRestartRequiredIfNeeded adds the restartRequired condition to the VM if any non-live-updatable field was changed
func (c *Controller) addRestartRequiredIfNeeded(lastSeenVMSpec *virtv1.VirtualMachineSpec, vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) bool {
if lastSeenVMSpec == nil {
return false
}
// Expand any instance types and preferences associated with lastSeenVMSpec or the current VM before working out if things are live-updatable
currentVM := vm.DeepCopy()
if err := c.instancetypeController.ApplyToVM(currentVM); err != nil {
return false
}
lastSeenVM := &virtv1.VirtualMachine{
// We need the namespace to be populated here for the lookup and application of instance types to work below
ObjectMeta: currentVM.DeepCopy().ObjectMeta,
Spec: *lastSeenVMSpec.DeepCopy(),
}
if err := c.instancetypeController.ApplyToVM(lastSeenVM); err != nil {
return false
}
if validLiveUpdateVolumes(&lastSeenVM.Spec, currentVM) {
lastSeenVM.Spec.Template.Spec.Volumes = currentVM.Spec.Template.Spec.Volumes
}
if validLiveUpdateDisks(&lastSeenVM.Spec, currentVM) {
lastSeenVM.Spec.Template.Spec.Domain.Devices.Disks = currentVM.Spec.Template.Spec.Domain.Devices.Disks
}
// Ignore all the live-updatable fields by copying them over. (If the feature gate is disabled, nothing is live-updatable)
// Note: this list needs to stay up-to-date with everything that can be live-updated
// Note2: destroying lastSeenVM here is fine, we don't need it later
if c.clusterConfig.IsVMRolloutStrategyLiveUpdate() {
if lastSeenVM.Spec.Template.Spec.Domain.CPU != nil && currentVM.Spec.Template.Spec.Domain.CPU != nil {
lastSeenVM.Spec.Template.Spec.Domain.CPU.Sockets = currentVM.Spec.Template.Spec.Domain.CPU.Sockets
}
if currentVM.Spec.Template.Spec.Domain.Memory != nil && currentVM.Spec.Template.Spec.Domain.Memory.Guest != nil {
if lastSeenVM.Spec.Template.Spec.Domain.Memory == nil {
lastSeenVM.Spec.Template.Spec.Domain.Memory = &virtv1.Memory{}
}
lastSeenVM.Spec.Template.Spec.Domain.Memory.Guest = currentVM.Spec.Template.Spec.Domain.Memory.Guest
}
lastSeenVM.Spec.Template.Spec.NodeSelector = currentVM.Spec.Template.Spec.NodeSelector
lastSeenVM.Spec.Template.Spec.Affinity = currentVM.Spec.Template.Spec.Affinity
lastSeenVM.Spec.Template.Spec.Tolerations = currentVM.Spec.Template.Spec.Tolerations
}
if !netvmliveupdate.IsRestartRequired(currentVM, vmi) {
lastSeenVM.Spec.Template.Spec.Domain.Devices.Interfaces = currentVM.Spec.Template.Spec.Domain.Devices.Interfaces
lastSeenVM.Spec.Template.Spec.Networks = currentVM.Spec.Template.Spec.Networks
}
if !equality.Semantic.DeepEqual(lastSeenVM.Spec.Template.Spec, currentVM.Spec.Template.Spec) {
setRestartRequired(vm, "a non-live-updatable field was changed in the template spec")
return true
}
return false
}
// These "dynamic" annotations/labels are VMI annotations/labels which may diverge from the VM over time that we want to keep in sync.
func (c *Controller) syncDynamicAnnotationsAndLabelsToVMI(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachineInstance, error) {
if vm == nil || vm.Spec.Template == nil || vmi == nil || vmi.DeletionTimestamp != nil {
return vmi, nil
}
patchSet := patch.New()
newVmiAnnotations := maps.Clone(vmi.Annotations)
newVmiLabels := maps.Clone(vmi.Labels)
syncMap := func(keys []string, vmMap, vmiMap, vmiOrigMap map[string]string, subPath string) {
changed := false
for _, key := range keys {
vmVal, vmExists := vmMap[key]
vmiVal, vmiExists := vmiMap[key]
if vmExists == vmiExists && vmVal == vmiVal {
continue
}
changed = true
if vmExists {
vmiMap[key] = vmVal
} else {
delete(vmiMap, key)
}
}
if !changed {
return
}
if vmiOrigMap == nil {
patchSet.AddOption(patch.WithAdd("/metadata/"+subPath, vmiMap))
} else {
patchSet.AddOption(
patch.WithTest("/metadata/"+subPath, vmiOrigMap),
patch.WithReplace("/metadata/"+subPath, vmiMap),
)
}
}
dynamicLabels := []string{}
dynamicLabels = append(dynamicLabels, c.additionalLauncherLabelsSync...)
dynamicAnnotations := []string{descheduler.EvictPodAnnotationKeyAlpha, descheduler.EvictPodAnnotationKeyAlphaPreferNoEviction}
dynamicAnnotations = append(dynamicAnnotations, c.additionalLauncherAnnotationsSync...)
syncMap(
dynamicLabels,
vm.Spec.Template.ObjectMeta.Labels, newVmiLabels, vmi.ObjectMeta.Labels, "labels",
)
syncMap(
dynamicAnnotations,
vm.Spec.Template.ObjectMeta.Annotations, newVmiAnnotations, vmi.ObjectMeta.Annotations, "annotations",
)
if patchSet.IsEmpty() {
return vmi, nil
}
generatedPatch, err := patchSet.GeneratePayload()
if err != nil {
return vmi, err
}
updatedVMI, err := c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, generatedPatch, metav1.PatchOptions{})
if err != nil {
log.Log.Object(vm).Errorf("failed to sync dynamic annotations to VMI: %v", err)
return vmi, err
}
return updatedVMI, nil
}
func (c *Controller) sync(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance, key string) (*virtv1.VirtualMachine, *virtv1.VirtualMachineInstance, common.SyncError, error) {
defer virtControllerVMWorkQueueTracer.StepTrace(key, "sync", trace.Field{Key: "VM Name", Value: vm.Name})
var (
syncErr common.SyncError
err error
startVMSpec *virtv1.VirtualMachineSpec
)
if !c.satisfiedExpectations(key) {
return vm, vmi, nil, nil
}
if vmi != nil {
startVMSpec, err = c.getLastVMRevisionSpec(vm)
if err != nil {
return vm, vmi, nil, err
}
}
if vm.DeletionTimestamp != nil {
if vmi == nil || controller.HasFinalizer(vm, metav1.FinalizerOrphanDependents) {
vm, err = c.removeVMFinalizer(vm)
if err != nil {
return vm, vmi, nil, err
}
} else {
vm, err = c.stopVMI(vm, vmi)
if err != nil {
log.Log.Object(vm).Errorf(failureDeletingVmiErrFormat, err)
return vm, vmi, common.NewSyncError(fmt.Errorf(failureDeletingVmiErrFormat, err), vmiFailedDeleteReason), nil
}
}
return vm, vmi, nil, nil
} else {
vm, err = c.addVMFinalizer(vm)
if err != nil {
return vm, vmi, nil, err
}
}
vmi, err = c.conditionallyBumpGenerationAnnotationOnVmi(vm, vmi)
if err != nil {
return nil, vmi, nil, err
}
// Scale up or down, if all expected creates and deletes were report by the listener
runStrategy, err := vm.RunStrategy()
if err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf(fetchingRunStrategyErrFmt, err), failedCreateReason), err
}
// FIXME(lyarwood): Move alongside netSynchronizer
syncedVM, err := c.instancetypeController.Sync(vm, vmi)
if err != nil {
return vm, vmi, handleSynchronizerErr(err), nil
}
if !equality.Semantic.DeepEqual(vm.Spec, syncedVM.Spec) {
return syncedVM, vmi, nil, nil
}
vm.ObjectMeta = syncedVM.ObjectMeta
vm.Spec = syncedVM.Spec
// eventually, would like the condition to be `== "true"`, but for now we need to support legacy behavior by default
if vm.Annotations[virtv1.ImmediateDataVolumeCreation] != "false" {
dataVolumesReady, err := c.handleDataVolumes(vm)
if err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered while creating DataVolumes: %v", err), failedCreateReason), nil
}
// not sure why we allow to proceed when halted but preserving legacy behavior
if !dataVolumesReady && runStrategy != virtv1.RunStrategyHalted {
log.Log.Object(vm).V(3).Info("Waiting on DataVolumes to be ready.")
return vm, vmi, nil, nil
}
}
origRunStrategy := vm.Spec.RunStrategy
vm, syncErr = c.syncRunStrategy(vm, vmi, runStrategy)
if syncErr != nil {
return vm, vmi, syncErr, nil
}
restartRequired := c.addRestartRequiredIfNeeded(startVMSpec, vm, vmi)
// Must check satisfiedExpectations again here because a VMI can be created or
// deleted in the startStop function which impacts how we process
// hotplugged volumes and interfaces
if !c.satisfiedExpectations(key) {
return vm, vmi, nil, nil
}
vmCopy := vm.DeepCopy()
vm.Spec.RunStrategy = origRunStrategy
if c.netSynchronizer != nil {
syncedVM, err := c.netSynchronizer.Sync(vmCopy, vmi)
if err != nil {
return vm, vmi, handleSynchronizerErr(err), nil
}
vmCopy.ObjectMeta = syncedVM.ObjectMeta
vmCopy.Spec = syncedVM.Spec
}
if c.firmwareSynchronizer != nil {
syncedVM, err := c.firmwareSynchronizer.Sync(vmCopy, vmi)
if err != nil {
return vm, vmi, handleSynchronizerErr(err), nil
}
vmCopy.ObjectMeta = syncedVM.ObjectMeta
vmCopy.Spec = syncedVM.Spec
}
if err := c.handleVolumeRequests(vmCopy, vmi); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered while handling volume hotplug requests: %v", err), hotplugVolumeErrorReason), nil
}
if err := c.handleDeclarativeVolumeHotplug(vmCopy, vmi); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered while handling declarative hotplug volumes: %v", err), hotplugVolumeErrorReason), nil
}
if err := memorydump.HandleRequest(c.clientset, vmCopy, vmi, c.pvcStore); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered while handling memory dump request: %v", err), memorydump.ErrorReason), nil
}
if vmi, err = c.syncDynamicAnnotationsAndLabelsToVMI(vmCopy, vmi); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered while handling annotation and labels sync request: %v", err), annotationsLabelsChangeErrorReason), nil
}
conditionManager := controller.NewVirtualMachineConditionManager()
if c.clusterConfig.IsVMRolloutStrategyLiveUpdate() && !restartRequired && !conditionManager.HasCondition(vm, virtv1.VirtualMachineRestartRequired) {
if err := c.handleCPUChangeRequest(vmCopy, vmi); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered while handling CPU change request: %v", err), hotplugCPUErrorReason), nil
}
if err := c.handleAffinityChangeRequest(vmCopy, vmi); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered while handling node affinity change request: %v", err), affinityChangeErrorReason), nil
}
if err := c.handleTolerationsChangeRequest(vmCopy, vmi); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered while handling tolerations change request: %v", err), tolerationsChangeErrorReason), nil
}
if err := c.handleMemoryHotplugRequest(vmCopy, vmi); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("error encountered while handling memory hotplug requests: %v", err), hotplugMemoryErrorReason), nil
}
if err := c.handleVolumeUpdateRequest(vmCopy, vmi); err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("error encountered while handling volumes update requests: %v", err), volumesUpdateErrorReason), nil
}
}
if !equality.Semantic.DeepEqual(vm.Spec, vmCopy.Spec) || !equality.Semantic.DeepEqual(vm.ObjectMeta, vmCopy.ObjectMeta) {
updatedVm, err := c.clientset.VirtualMachine(vmCopy.Namespace).Update(context.Background(), vmCopy, metav1.UpdateOptions{})
if err != nil {
return vm, vmi, common.NewSyncError(fmt.Errorf("Error encountered when trying to update vm according to add volume and/or memory dump requests: %v", err), failedUpdateErrorReason), nil
}
vm = updatedVm
} else {
vm = vmCopy
}
return vm, vmi, nil, nil
}
func handleSynchronizerErr(err error) common.SyncError {
if err == nil {
return nil
}
var errWithReason common.SyncError
if errors.As(err, &errWithReason) {
return errWithReason
}
return common.NewSyncError(fmt.Errorf("unsupported error: %v", err), "UnsupportedSyncError")
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *virtv1.VirtualMachine {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != virtv1.VirtualMachineGroupVersionKind.Kind {
return nil
}
vm, exists, err := c.vmIndexer.GetByKey(controller.NamespacedKey(namespace, controllerRef.Name))
if err != nil {
return nil
}
if !exists {
return nil
}
if vm.(*virtv1.VirtualMachine).UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return vm.(*virtv1.VirtualMachine)
}
func AutoAttachInputDevice(vmi *virtv1.VirtualMachineInstance) {
autoAttachInput := vmi.Spec.Domain.Devices.AutoattachInputDevice
// Default to False if nil and return, otherwise return if input devices are already present
if autoAttachInput == nil || !*autoAttachInput || len(vmi.Spec.Domain.Devices.Inputs) > 0 {
return
}
// Only add the device with an alias here. Preferences for the bus and type might
// be applied later and if not the VMI mutation webhook will apply defaults for both.
vmi.Spec.Domain.Devices.Inputs = append(
vmi.Spec.Domain.Devices.Inputs,
virtv1.Input{
Name: "default-0",
},
)
}
func (c *Controller) handleMemoryHotplugRequest(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil || vmi.DeletionTimestamp != nil {
return nil
}
vmCopyWithInstancetype := vm.DeepCopy()
if err := c.instancetypeController.ApplyToVM(vmCopyWithInstancetype); err != nil {
return err
}
if vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory == nil ||
vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest == nil ||
vmi.Spec.Domain.Memory == nil ||
vmi.Spec.Domain.Memory.Guest == nil ||
vmi.Status.Memory == nil ||
vmi.Status.Memory.GuestCurrent == nil {
return nil
}
conditionManager := controller.NewVirtualMachineInstanceConditionManager()
if conditionManager.HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceMemoryChange, k8score.ConditionFalse) {
setRestartRequired(vm, "memory updated in template spec. Memory-hotplug failed and is not available for this VM configuration")
return nil
}
if vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest.Equal(*vmi.Spec.Domain.Memory.Guest) {
return nil
}
if !vmi.IsMigratable() {
setRestartRequired(vm, "memory updated in template spec. Memory-hotplug is only available for migratable VMs")
return nil
}
if vmi.Spec.Domain.Memory.MaxGuest == nil {
setRestartRequired(vm, "memory updated in template spec. Memory-hotplug is not available for this VM configuration")
return nil
}
if conditionManager.HasConditionWithStatus(vmi,
virtv1.VirtualMachineInstanceMemoryChange, k8score.ConditionTrue) {
return fmt.Errorf("another memory hotplug is in progress")
}
if migrations.IsMigrating(vmi) {
return fmt.Errorf("memory hotplug is not allowed while VMI is migrating")
}
if err := memory.ValidateLiveUpdateMemory(&vmCopyWithInstancetype.Spec.Template.Spec, vmi.Spec.Domain.Memory.MaxGuest); err != nil {
setRestartRequired(vm, fmt.Sprintf("memory hotplug not supported, %s", err.Error()))
return nil
}
if vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest != nil && vmi.Status.Memory.GuestAtBoot != nil &&
vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest.Cmp(*vmi.Status.Memory.GuestAtBoot) == -1 {
setRestartRequired(vm, "memory updated in template spec to a value lower than what the VM started with")
return nil
}
// If the following is true, MaxGuest was calculated, not manually specified (or the validation webhook would have rejected the change).
// Since we're here, we can also assume MaxGuest was not changed in the VM spec since last boot.
// Therefore, bumping Guest to a value higher than MaxGuest is fine, it just requires a reboot.
if vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest != nil && vmi.Spec.Domain.Memory.MaxGuest != nil &&
vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest.Cmp(*vmi.Spec.Domain.Memory.MaxGuest) == 1 {
setRestartRequired(vm, "memory updated in template spec to a value higher than what's available")
return nil
}
memoryDelta := resource.NewQuantity(vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest.Value()-vmi.Status.Memory.GuestCurrent.Value(), resource.BinarySI)
patchSet := patch.New(
patch.WithTest("/spec/domain/memory/guest", vmi.Spec.Domain.Memory.Guest.String()),
patch.WithReplace("/spec/domain/memory/guest", vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest.String()),
)
logMsg := fmt.Sprintf("hotplugging memory to %s", vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest.String())
if !vmi.Spec.Domain.Resources.Requests.Memory().IsZero() {
newMemoryReq := vmi.Spec.Domain.Resources.Requests.Memory().DeepCopy()
newMemoryReq.Add(*memoryDelta)
// checking if the new memory req are at least equal to the memory being requested in the handleMemoryHotplugRequest
// this is necessary as weirdness can arise after hot-unplugs as not all memory is guaranteed to be released when doing hot-unplug.
if newMemoryReq.Cmp(*vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest) == -1 {
newMemoryReq = *vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest
// adjusting memoryDelta too for the new limits computation (if required)
memoryDelta = resource.NewQuantity(vmCopyWithInstancetype.Spec.Template.Spec.Domain.Memory.Guest.Value()-newMemoryReq.Value(), resource.BinarySI)
}
patchSet.AddOption(
patch.WithTest("/spec/domain/resources/requests/memory", vmi.Spec.Domain.Resources.Requests.Memory().String()),
patch.WithReplace("/spec/domain/resources/requests/memory", newMemoryReq.String()),
)
logMsg = fmt.Sprintf("%s, setting requests to %s", logMsg, newMemoryReq.String())
}
if !vmCopyWithInstancetype.Spec.Template.Spec.Domain.Resources.Limits.Memory().IsZero() {
newMemoryLimit := vmi.Spec.Domain.Resources.Limits.Memory().DeepCopy()
newMemoryLimit.Add(*memoryDelta)
patchSet.AddOption(
patch.WithTest("/spec/domain/resources/limits/memory", vmi.Spec.Domain.Resources.Limits.Memory().String()),
patch.WithReplace("/spec/domain/resources/limits/memory", newMemoryLimit.String()),
)
logMsg = fmt.Sprintf("%s, setting limits to %s", logMsg, newMemoryLimit.String())
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
if err != nil {
return err
}
log.Log.Object(vmi).Infof("%s", logMsg)
return nil
}
func (c *Controller) handleDeclarativeVolumeHotplug(vm *virtv1.VirtualMachine, vmi *virtv1.VirtualMachineInstance) error {
if c.clusterConfig.HotplugVolumesEnabled() || !c.clusterConfig.DeclarativeHotplugVolumesEnabled() {
log.Log.Object(vm).V(4).Info("Declarative hotplug volumes are not enabled, skipping")
return nil
}
return storagehotplug.HandleDeclarativeVolumes(c.clientset, vm, vmi)
}
func (c *Controller) handleKubeVirtUpdate(oldObj, newObj interface{}) {
okv, ok := oldObj.(*virtv1.KubeVirt)
if !ok {
return
}
nkv, ok := newObj.(*virtv1.KubeVirt)
if !ok {
return
}
oldCBTSelectors := okv.Spec.Configuration.ChangedBlockTrackingLabelSelectors
newCBTSelectors := nkv.Spec.Configuration.ChangedBlockTrackingLabelSelectors
if equality.Semantic.DeepEqual(oldCBTSelectors, newCBTSelectors) {
return
}
// In case the ChangedBlockTrackingLabelSelectors has changed,
// we need to re-queue all the VMs as the CBT might have changed for them
keys := c.vmIndexer.ListKeys()
for _, key := range keys {
c.Queue.Add(key)
}
}
func (c *Controller) handleNamespaceUpdate(oldObj, newObj interface{}) {
oldNS, ok := oldObj.(*k8score.Namespace)
if !ok {
return
}
newNS, ok := newObj.(*k8score.Namespace)
if !ok {
return
}
oldNSLabels := oldNS.Labels
newNSLabels := newNS.Labels
if equality.Semantic.DeepEqual(oldNSLabels, newNSLabels) {
return
}
labelSelectors := c.clusterConfig.GetConfig().ChangedBlockTrackingLabelSelectors
if labelSelectors == nil {
return
}
namespaceSelector := labelSelectors.NamespaceLabelSelector
if namespaceSelector == nil {
return
}
nsSelector, err := metav1.LabelSelectorAsSelector(namespaceSelector)
if err != nil {
return
}
if nsSelector.Matches(labels.Set(oldNS.Labels)) ==
nsSelector.Matches(labels.Set(newNS.Labels)) {
return
}
vmKeys, err := c.vmIndexer.IndexKeys(cache.NamespaceIndex, newNS.Name)
if err != nil {
return
}
for _, vmKey := range vmKeys {
c.Queue.Add(vmKey)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmi
import (
"fmt"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/controller"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/common"
)
// addDataVolume handles the addition of a DataVolume, enqueuing affected VMIs.
func (c *Controller) addDataVolume(obj interface{}) {
dataVolume := obj.(*cdiv1.DataVolume)
if dataVolume.DeletionTimestamp != nil {
c.deleteDataVolume(dataVolume)
return
}
vmis, err := c.listVMIsMatchingDV(dataVolume.Namespace, dataVolume.Name)
if err != nil {
return
}
for _, vmi := range vmis {
log.Log.V(4).Object(dataVolume).Infof("DataVolume created for vmi %s", vmi.Name)
c.enqueueVirtualMachine(vmi)
}
}
// updateDataVolume handles updates to a DataVolume, enqueuing affected VMIs.
func (c *Controller) updateDataVolume(old, cur interface{}) {
curDataVolume := cur.(*cdiv1.DataVolume)
oldDataVolume := old.(*cdiv1.DataVolume)
if curDataVolume.ResourceVersion == oldDataVolume.ResourceVersion {
// Periodic resync will send update events for all known DataVolumes.
// Two different versions of the same dataVolume will always
// have different RVs.
return
}
if curDataVolume.DeletionTimestamp != nil {
labelChanged := !equality.Semantic.DeepEqual(curDataVolume.Labels, oldDataVolume.Labels)
// having a DataVOlume marked for deletion is enough
// to count as a deletion expectation
c.deleteDataVolume(curDataVolume)
if labelChanged {
// we don't need to check the oldDataVolume.DeletionTimestamp
// because DeletionTimestamp cannot be unset.
c.deleteDataVolume(oldDataVolume)
}
return
}
vmis, err := c.listVMIsMatchingDV(curDataVolume.Namespace, curDataVolume.Name)
if err != nil {
log.Log.Object(curDataVolume).Errorf("Error encountered during datavolume update: %v", err)
return
}
for _, vmi := range vmis {
log.Log.V(4).Object(curDataVolume).Infof("DataVolume updated for vmi %s", vmi.Name)
c.enqueueVirtualMachine(vmi)
}
}
// deleteDataVolume handles the deletion of a DataVolume, enqueuing affected VMIs.
func (c *Controller) deleteDataVolume(obj interface{}) {
dataVolume, ok := obj.(*cdiv1.DataVolume)
// When a delete is dropped, the relist will notice a dataVolume in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the dataVolume
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf(tombstoneGetObjectErrFmt, obj)).Error(deleteNotifFailed)
return
}
dataVolume, ok = tombstone.Obj.(*cdiv1.DataVolume)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a DataVolume %#v", obj)).Error(deleteNotifFailed)
return
}
}
vmis, err := c.listVMIsMatchingDV(dataVolume.Namespace, dataVolume.Name)
if err != nil {
return
}
for _, vmi := range vmis {
log.Log.V(4).Object(dataVolume).Infof("DataVolume deleted for vmi %s", vmi.Name)
c.enqueueVirtualMachine(vmi)
}
}
func (c *Controller) areDataVolumesReady(vmi *v1.VirtualMachineInstance, dataVolumes []*cdiv1.DataVolume) (bool, bool, common.SyncError) {
ready := true
wffc := false
for _, volume := range vmi.Spec.Volumes {
// Check both DVs and PVCs
if (volume.VolumeSource.DataVolume != nil && !volume.VolumeSource.DataVolume.Hotpluggable) ||
(volume.VolumeSource.PersistentVolumeClaim != nil && !volume.VolumeSource.PersistentVolumeClaim.Hotpluggable) {
volumeReady, volumeWffc, err := storagetypes.VolumeReadyToAttachToNode(vmi.Namespace, volume, dataVolumes, c.dataVolumeIndexer, c.pvcIndexer)
if err != nil {
if _, ok := err.(storagetypes.PvcNotFoundError); ok {
// due to the eventually consistent nature of controllers, CDI or users may need some time to actually crate the PVC.
// We wait for them to appear.
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, controller.FailedPvcNotFoundReason, "PVC %s/%s does not exist, waiting for it to appear", vmi.Namespace, storagetypes.PVCNameFromVirtVolume(&volume))
return false, false, &informalSyncError{err: fmt.Errorf("PVC %s/%s does not exist, waiting for it to appear", vmi.Namespace, storagetypes.PVCNameFromVirtVolume(&volume)), reason: controller.FailedPvcNotFoundReason}
} else {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedPvcNotFoundReason, "Error determining if volume is ready: %v", err)
return false, false, common.NewSyncError(fmt.Errorf("Error determining if volume is ready %v", err), controller.FailedDataVolumeImportReason)
}
}
wffc = wffc || volumeWffc
// Ready only becomes false if WFFC is also false.
ready = ready && (volumeReady || volumeWffc)
}
}
return ready, wffc, nil
}
func aggregateDataVolumesConditions(vmiCopy *v1.VirtualMachineInstance, dvs []*cdiv1.DataVolume) {
if len(dvs) == 0 {
return
}
dvsReadyCondition := v1.VirtualMachineInstanceCondition{
Status: k8sv1.ConditionTrue,
Type: v1.VirtualMachineInstanceDataVolumesReady,
Reason: v1.VirtualMachineInstanceReasonAllDVsReady,
Message: "All of the VMI's DVs are bound and ready",
}
for _, dv := range dvs {
cStatus := statusOfReadyCondition(dv.Status.Conditions)
if cStatus != k8sv1.ConditionTrue {
dvsReadyCondition.Reason = v1.VirtualMachineInstanceReasonNotAllDVsReady
if cStatus == k8sv1.ConditionFalse {
dvsReadyCondition.Status = cStatus
} else if dvsReadyCondition.Status == k8sv1.ConditionTrue {
dvsReadyCondition.Status = cStatus
}
}
}
if dvsReadyCondition.Status != k8sv1.ConditionTrue {
dvsReadyCondition.Message = "Not all of the VMI's DVs are ready"
}
vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
vmiConditions.UpdateCondition(vmiCopy, &dvsReadyCondition)
}
func statusOfReadyCondition(conditions []cdiv1.DataVolumeCondition) k8sv1.ConditionStatus {
for _, condition := range conditions {
if condition.Type == cdiv1.DataVolumeReady {
return condition.Status
}
}
return k8sv1.ConditionUnknown
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmi
import (
"context"
"encoding/json"
"errors"
"fmt"
"maps"
"slices"
"strings"
"time"
k8sv1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/trace"
virtv1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"k8s.io/apimachinery/pkg/api/equality"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/pointer"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/util"
"kubevirt.io/kubevirt/pkg/util/hardware"
"kubevirt.io/kubevirt/pkg/util/migrations"
"kubevirt.io/kubevirt/pkg/virt-controller/services"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/common"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/descheduler"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
)
func (c *Controller) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume) (common.SyncError, *k8sv1.Pod) {
key := controller.VirtualMachineInstanceKey(vmi)
defer virtControllerVMIWorkQueueTracer.StepTrace(key, "sync", trace.Field{Key: "VMI Name", Value: vmi.Name})
if vmi.DeletionTimestamp != nil {
err := c.deleteAllMatchingPods(vmi)
if err != nil {
return common.NewSyncError(fmt.Errorf("failed to delete pod: %v", err), controller.FailedDeletePodReason), pod
}
return nil, pod
}
if vmi.IsFinal() {
err := c.deleteAllAttachmentPods(vmi)
if err != nil {
return common.NewSyncError(fmt.Errorf("failed to delete attachment pods: %v", err), controller.FailedHotplugSyncReason), pod
}
return nil, pod
}
if err := c.deleteOrphanedAttachmentPods(vmi); err != nil {
log.Log.Reason(err).Errorf("failed to delete orphaned attachment pods %s: %v", key, err)
// do not return; just log the error
}
dataVolumesReady, isWaitForFirstConsumer, syncErr := c.areDataVolumesReady(vmi, dataVolumes)
if syncErr != nil {
return syncErr, pod
}
if vmi.IsMigrationTarget() && !vmi.IsMigrationTargetNodeLabelSet() {
// VMI is a receiver VMI, and not fully synced, wait for the sync to complete
return nil, pod
}
if !controller.PodExists(pod) {
// If we came ever that far to detect that we already created a pod, we don't create it again
if !vmi.IsUnprocessed() {
return nil, pod
}
// let's check if we already have topology hints or if we are still waiting for them
if vmi.Status.TopologyHints == nil && c.topologyHinter.IsTscFrequencyRequired(vmi) {
log.Log.V(3).Object(vmi).Infof("Delaying pod creation until topology hints are set")
return nil, pod
}
// ensure that all dataVolumes associated with the VMI are ready before creating the pod
if !dataVolumesReady {
log.Log.V(3).Object(vmi).Infof("Delaying pod creation while DataVolume populates or while we wait for PVCs to appear.")
return nil, pod
}
// ensure the VMI doesn't have an unfinished migration before creating the pod
activeMigration, err := migrations.ActiveMigrationExistsForVMI(c.migrationIndexer, vmi)
if err != nil {
return common.NewSyncError(err, controller.FailedCreatePodReason), pod
}
if activeMigration {
log.Log.V(3).Object(vmi).Infof("Delaying pod creation because an active migration exists for the VMI.")
// We still need to return an error to ensure the VMI gets re-enqueued
return common.NewSyncError(fmt.Errorf("active migration exists"), controller.FailedCreatePodReason), pod
}
backendStoragePVCName, syncErr := c.handleBackendStorage(vmi)
if syncErr != nil {
return syncErr, pod
}
// If a backend-storage PVC was just created but not yet seen by the informer, give it time
if !c.pvcExpectations.SatisfiedExpectations(key) {
return nil, pod
}
backendStorageReady, err := c.backendStorage.IsPVCReady(vmi, backendStoragePVCName)
if err != nil {
return common.NewSyncError(err, controller.FailedBackendStorageProbeReason), pod
}
if !backendStorageReady {
log.Log.V(2).Object(vmi).Infof("Delaying pod creation while backend storage populates.")
return common.NewSyncError(fmt.Errorf("PVC pending"), controller.BackendStorageNotReadyReason), pod
}
var templatePod *k8sv1.Pod
if isWaitForFirstConsumer {
log.Log.V(3).Object(vmi).Infof("Scheduling temporary pod for WaitForFirstConsumer DV")
templatePod, err = c.templateService.RenderLaunchManifestNoVm(vmi)
} else {
templatePod, err = c.templateService.RenderLaunchManifest(vmi)
}
if _, ok := err.(storagetypes.PvcNotFoundError); ok {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedPvcNotFoundReason, services.FailedToRenderLaunchManifestErrFormat, err)
return &informalSyncError{fmt.Errorf(services.FailedToRenderLaunchManifestErrFormat, err), controller.FailedPvcNotFoundReason}, pod
} else if err != nil {
return common.NewSyncError(fmt.Errorf(services.FailedToRenderLaunchManifestErrFormat, err), controller.FailedCreatePodReason), pod
}
var validateErrors []error
for _, cause := range c.validateNetworkSpec(k8sfield.NewPath("spec"), &vmi.Spec, c.clusterConfig) {
validateErrors = append(validateErrors, errors.New(cause.String()))
}
if validateErr := errors.Join(validateErrors...); validateErrors != nil {
return common.NewSyncError(fmt.Errorf("failed create validation: %v", validateErr), "FailedCreateValidation"), pod
}
vmiKey := controller.VirtualMachineInstanceKey(vmi)
pod, err := c.createPod(vmiKey, vmi.Namespace, templatePod)
if k8serrors.IsForbidden(err) && strings.Contains(err.Error(), "violates PodSecurity") {
psaErr := fmt.Errorf("failed to create pod for vmi %s/%s, it needs a privileged namespace to run: %w", vmi.GetNamespace(), vmi.GetName(), err)
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedCreatePodReason, services.FailedToRenderLaunchManifestErrFormat, psaErr)
return common.NewSyncError(psaErr, controller.FailedCreatePodReason), nil
}
if err != nil {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedCreatePodReason, "Error creating pod: %v", err)
return common.NewSyncError(fmt.Errorf("failed to create virtual machine pod: %v", err), controller.FailedCreatePodReason), nil
}
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, controller.SuccessfulCreatePodReason, "Created virtual machine pod %s", pod.Name)
return nil, pod
}
if !isWaitForFirstConsumer {
err := c.cleanupWaitForFirstConsumerTemporaryPods(vmi, pod)
if err != nil {
return common.NewSyncError(fmt.Errorf("failed to clean up temporary pods: %v", err), controller.FailedHotplugSyncReason), pod
}
}
if !isTempPod(pod) && controller.IsPodReady(pod) {
newAnnotations := map[string]string{descheduler.EvictOnlyAnnotation: ""}
maps.Copy(newAnnotations, c.netAnnotationsGenerator.GenerateFromActivePod(vmi, pod))
patchedPod, err := controller.SyncPodAnnotations(c.clientset, pod, newAnnotations)
if err != nil {
return common.NewSyncError(err, controller.FailedPodPatchReason), pod
}
pod = patchedPod
_, podIsMarkedForEviction := pod.GetAnnotations()[descheduler.EvictionInProgressAnnotation]
if vmi.IsMarkedForEviction() && !podIsMarkedForEviction {
patchedPod, err = descheduler.MarkEvictionInProgress(c.clientset, pod)
if err != nil {
return common.NewSyncError(err, controller.FailedPodPatchReason), pod
}
}
if !vmi.IsMarkedForEviction() && podIsMarkedForEviction {
patchedPod, err = descheduler.MarkEvictionCompleted(c.clientset, pod)
if err != nil {
return common.NewSyncError(err, controller.FailedPodPatchReason), pod
}
}
pod = patchedPod
hotplugVolumes := storagetypes.GetHotplugVolumes(vmi, pod)
hotplugAttachmentPods, err := controller.AttachmentPods(pod, c.podIndexer)
if err != nil {
return common.NewSyncError(fmt.Errorf("failed to get attachment pods: %v", err), controller.FailedHotplugSyncReason), pod
}
if pod.DeletionTimestamp == nil && needsHandleHotplug(hotplugVolumes, hotplugAttachmentPods) {
var hotplugSyncErr common.SyncError
hotplugSyncErr = c.handleHotplugVolumes(hotplugVolumes, hotplugAttachmentPods, vmi, pod, dataVolumes)
if hotplugSyncErr != nil {
if hotplugSyncErr.Reason() == controller.MissingAttachmentPodReason {
// We are missing an essential hotplug pod. Delete all pods associated with the VMI.
if err := c.deleteAllMatchingPods(vmi); err != nil {
log.Log.Warningf("failed to deleted VMI %s pods: %v", vmi.GetUID(), err)
}
} else {
return hotplugSyncErr, pod
}
}
}
}
return nil, pod
}
func (c *Controller) getOwnerVM(vmi *virtv1.VirtualMachineInstance) *virtv1.VirtualMachine {
controllerRef := v1.GetControllerOf(vmi)
if controllerRef == nil || controllerRef.Kind != virtv1.VirtualMachineGroupVersionKind.Kind {
return nil
}
obj, exists, _ := c.vmStore.GetByKey(controller.NamespacedKey(vmi.Namespace, controllerRef.Name))
if !exists {
return nil
}
ownerVM := obj.(*virtv1.VirtualMachine)
if controllerRef.UID == ownerVM.UID {
return ownerVM.DeepCopy()
}
return nil
}
// updateStatus handles the VMI's lifecycle status updates.
func (c *Controller) updateStatus(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume, syncErr common.SyncError) error {
key := controller.VirtualMachineInstanceKey(vmi)
defer virtControllerVMIWorkQueueTracer.StepTrace(key, "updateStatus", trace.Field{Key: "VMI Name", Value: vmi.Name})
hasFailedDataVolume := storagetypes.HasFailedDataVolumes(dataVolumes)
// there is no reason to check for waitForFirstConsumer is there are failed DV's
hasWffcDataVolume := !hasFailedDataVolume && storagetypes.HasWFFCDataVolumes(dataVolumes)
conditionManager := controller.NewVirtualMachineInstanceConditionManager()
podConditionManager := controller.NewPodConditionManager()
vmiCopy := vmi.DeepCopy()
vmiPodExists := controller.PodExists(pod) && !isTempPod(pod)
tempPodExists := controller.PodExists(pod) && isTempPod(pod)
vmiCopy, err := c.setActivePods(vmiCopy)
if err != nil {
return fmt.Errorf("Error detecting vmi pods: %v", err)
}
c.syncReadyConditionFromPod(vmiCopy, pod)
if vmiPodExists {
var foundImage string
for _, container := range pod.Spec.Containers {
if container.Name == "compute" {
foundImage = container.Image
break
}
}
vmiCopy = c.setLauncherContainerInfo(vmiCopy, foundImage)
if err := c.syncPausedConditionToPod(vmiCopy, pod); err != nil {
return fmt.Errorf("error syncing paused condition to pod: %v", err)
}
if pod, err = c.syncDynamicAnnotationsAndLabelsToPod(vmiCopy, pod); err != nil {
return fmt.Errorf("error syncing annotations and labels to pod: %v", err)
}
}
aggregateDataVolumesConditions(vmiCopy, dataVolumes)
if pvc := backendstorage.PVCForVMI(c.pvcIndexer, vmi); pvc != nil {
c.backendStorage.UpdateVolumeStatus(vmiCopy, pvc)
}
switch {
case vmi.IsUnprocessed():
if vmiPodExists {
vmiCopy.Status.Phase = virtv1.Scheduling
} else if vmi.DeletionTimestamp != nil || hasFailedDataVolume {
vmiCopy.Status.Phase = virtv1.Failed
} else if vmi.IsMigrationTarget() && !vmi.IsMigrationTargetNodeLabelSet() {
vmiCopy.Status.Phase = virtv1.WaitingForSync
} else {
vmiCopy.Status.Phase = virtv1.Pending
if vmi.Status.TopologyHints == nil {
if topologyHints, tscRequirement, err := c.topologyHinter.TopologyHintsForVMI(vmi); err != nil && tscRequirement == topology.RequiredForBoot {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedGatherhingClusterTopologyHints, err.Error())
return common.NewSyncError(err, controller.FailedGatherhingClusterTopologyHints)
} else if topologyHints != nil {
vmiCopy.Status.TopologyHints = topologyHints
}
}
if hasWffcDataVolume {
condition := virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceProvisioning,
Status: k8sv1.ConditionTrue,
}
if !conditionManager.HasCondition(vmiCopy, condition.Type) {
vmiCopy.Status.Conditions = append(vmiCopy.Status.Conditions, condition)
}
if tempPodExists {
// Add PodScheduled False condition to the VM
if podConditionManager.HasConditionWithStatus(pod, k8sv1.PodScheduled, k8sv1.ConditionFalse) {
conditionManager.AddPodCondition(vmiCopy, podConditionManager.GetCondition(pod, k8sv1.PodScheduled))
} else if conditionManager.HasCondition(vmiCopy, virtv1.VirtualMachineInstanceConditionType(k8sv1.PodScheduled)) {
// Remove PodScheduling condition from the VM
conditionManager.RemoveCondition(vmiCopy, virtv1.VirtualMachineInstanceConditionType(k8sv1.PodScheduled))
}
if controller.IsPodFailedOrGoingDown(pod) {
vmiCopy.Status.Phase = virtv1.Failed
}
}
}
if syncErr != nil && syncErr.Reason() == controller.FailedPvcNotFoundReason {
condition := virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceConditionType(k8sv1.PodScheduled),
Reason: k8sv1.PodReasonUnschedulable,
Message: syncErr.Error(),
Status: k8sv1.ConditionFalse,
}
if conditionManager.HasCondition(vmiCopy, condition.Type) {
conditionManager.RemoveCondition(vmiCopy, condition.Type)
}
vmiCopy.Status.Conditions = append(vmiCopy.Status.Conditions, condition)
}
}
case vmi.IsScheduling():
// Remove InstanceProvisioning condition from the VM
if conditionManager.HasCondition(vmiCopy, virtv1.VirtualMachineInstanceProvisioning) {
conditionManager.RemoveCondition(vmiCopy, virtv1.VirtualMachineInstanceProvisioning)
}
if vmiPodExists {
// ensure that the QOS class on the VMI matches to Pods QOS class
if pod.Status.QOSClass == "" {
vmiCopy.Status.QOSClass = nil
} else {
vmiCopy.Status.QOSClass = &pod.Status.QOSClass
}
// Add PodScheduled False condition to the VM
if podConditionManager.HasConditionWithStatus(pod, k8sv1.PodScheduled, k8sv1.ConditionFalse) {
conditionManager.AddPodCondition(vmiCopy, podConditionManager.GetCondition(pod, k8sv1.PodScheduled))
} else if conditionManager.HasCondition(vmiCopy, virtv1.VirtualMachineInstanceConditionType(k8sv1.PodScheduled)) {
// Remove PodScheduling condition from the VM
conditionManager.RemoveCondition(vmiCopy, virtv1.VirtualMachineInstanceConditionType(k8sv1.PodScheduled))
}
if imageErr := checkForContainerImageError(pod); imageErr != nil {
// only overwrite syncErr if imageErr != nil
syncErr = imageErr
}
if controller.IsPodReady(pod) && vmi.DeletionTimestamp == nil {
// fail vmi creation if CPU pinning has been requested but the Pod QOS is not Guaranteed
podQosClass := pod.Status.QOSClass
if podQosClass != k8sv1.PodQOSGuaranteed && vmi.IsCPUDedicated() {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedGuaranteePodResourcesReason, "failed to guarantee pod resources")
syncErr = common.NewSyncError(fmt.Errorf("failed to guarantee pod resources"), controller.FailedGuaranteePodResourcesReason)
break
}
// Storage
// Initialize the volume status field with information
// about the PVCs that the VMI is consuming. This prevents
// virt-handler from needing to make API calls to GET the pvc
// during reconcile
if err := c.updateVolumeStatus(vmiCopy, pod); err != nil {
return err
}
// Network
if err := c.updateNetworkStatus(vmiCopy, pod); err != nil {
log.Log.Errorf("failed to update the interface status: %v", err)
}
// vmi is still owned by the controller but pod is already ready,
// so let's hand over the vmi too
vmiCopy.Status.Phase = virtv1.Scheduled
if vmiCopy.Labels == nil {
vmiCopy.Labels = map[string]string{}
}
vmiCopy.ObjectMeta.Labels[virtv1.NodeNameLabel] = pod.Spec.NodeName
vmiCopy.Status.NodeName = pod.Spec.NodeName
// Set the VMI migration transport now before the VMI can be migrated
// This status field is needed to support the migration of legacy virt-launchers
// to newer ones. In an absence of this field on the vmi, the target launcher
// will set up a TCP proxy, as expected by a legacy virt-launcher.
if shouldSetMigrationTransport(pod) {
vmiCopy.Status.MigrationTransport = virtv1.MigrationTransportUnix
}
// Allocate the CID if VSOCK is enabled.
if util.IsAutoAttachVSOCK(vmiCopy) {
if err := c.cidsMap.Allocate(vmiCopy); err != nil {
return err
}
}
} else if controller.IsPodDownOrGoingDown(pod) {
vmiCopy.Status.Phase = virtv1.Failed
}
} else {
log.Log.Object(vmi).V(5).Infof("setting VMI to failed during scheduling because pod does not exist")
// someone other than the controller deleted the pod unexpectedly
vmiCopy.Status.Phase = virtv1.Failed
}
case vmi.IsFinal():
allDeleted, err := c.allPodsDeleted(vmi)
if err != nil {
return err
}
if allDeleted {
log.Log.V(3).Object(vmi).Infof("all pods have been deleted, removing finalizer")
controller.RemoveFinalizer(vmiCopy, virtv1.DeprecatedVirtualMachineInstanceFinalizer)
controller.RemoveFinalizer(vmiCopy, virtv1.VirtualMachineInstanceFinalizer)
if vmiCopy.Labels != nil {
delete(vmiCopy.Labels, virtv1.OutdatedLauncherImageLabel)
}
vmiCopy.Status.LauncherContainerImageVersion = ""
}
if !c.hasOwnerVM(vmi) && len(vmiCopy.Finalizers) > 0 {
// if there's no owner VM around still, then remove the VM controller's finalizer if it exists
controller.RemoveFinalizer(vmiCopy, virtv1.VirtualMachineControllerFinalizer)
}
case vmi.IsRunning():
if !vmiPodExists {
log.Log.Object(vmi).V(5).Infof("setting VMI to failed while running because pod does not exist")
vmiCopy.Status.Phase = virtv1.Failed
break
}
if pod.Status.Phase == k8sv1.PodSucceeded && vmi.IsDecentralizedMigration() && vmi.Status.MigrationState != nil && vmi.Status.MigrationState.Completed {
vmiCopy.Status.Phase = virtv1.Succeeded
break
}
// Storage
if err := c.updateVolumeStatus(vmiCopy, pod); err != nil {
return err
}
// Network
if err := c.updateNetworkStatus(vmiCopy, pod); err != nil {
log.Log.Errorf("failed to update the interface status: %v", err)
}
if c.requireCPUHotplug(vmiCopy) {
syncHotplugCondition(vmiCopy, virtv1.VirtualMachineInstanceVCPUChange)
}
if c.requireMemoryHotplug(vmiCopy) {
c.syncMemoryHotplug(vmiCopy)
}
if c.requireVolumesUpdate(vmiCopy) {
c.syncVolumesUpdate(vmiCopy)
}
c.syncMigrationRequiredCondition(vmiCopy)
c.checkEphemeralHotplugVolumes(vmiCopy)
case vmi.IsScheduled():
if !vmiPodExists {
if vmiCopy.IsDecentralizedMigration() && vmiCopy.IsMigrationTarget() {
log.Log.Object(vmi).V(2).Infof("setting VMI to WaitingForSync while scheduled because pod does not exist")
vmiCopy.Status.Phase = virtv1.WaitingForSync
if vmiCopy.Status.MigrationState != nil {
vmiCopy.Status.MigrationState.Failed = true
vmiCopy.Status.MigrationState.Completed = true
vmiCopy.Status.MigrationState.EndTimestamp = pointer.P(metav1.Now())
}
break
}
log.Log.Object(vmi).V(5).Infof("setting VMI to failed while scheduled because pod does not exist")
vmiCopy.Status.Phase = virtv1.Failed
break
}
if err := c.updateVolumeStatus(vmiCopy, pod); err != nil {
return err
}
case vmi.IsWaitingForSync():
if vmi.DeletionTimestamp != nil {
// Deleted VMI while waiting for sync, remove finalizers.
log.Log.Object(vmi).V(1).Infof("deleting VMI while waiting for sync, removing finalizers")
controller.RemoveFinalizer(vmiCopy, virtv1.DeprecatedVirtualMachineInstanceFinalizer)
controller.RemoveFinalizer(vmiCopy, virtv1.VirtualMachineInstanceFinalizer)
if !c.hasOwnerVM(vmi) && len(vmiCopy.Finalizers) > 0 {
// if there's no owner VM around still, then remove the VM controller's finalizer if it exists
controller.RemoveFinalizer(vmiCopy, virtv1.VirtualMachineControllerFinalizer)
}
} else if vmiPodExists {
vmiCopy.Status.Phase = virtv1.Scheduling
}
default:
return fmt.Errorf("unknown vmi phase %v", vmi.Status.Phase)
}
if vmiCopy.IsMarkedForEviction() {
if !conditionManager.HasConditionWithStatus(vmiCopy, virtv1.VirtualMachineInstanceEvictionRequested, k8sv1.ConditionTrue) {
now := v1.Now()
conditionManager.UpdateCondition(vmiCopy, &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceEvictionRequested,
Status: k8sv1.ConditionTrue,
Reason: virtv1.VirtualMachineInstanceReasonEvictionRequested,
Message: "VMI is marked for eviction",
LastProbeTime: now,
LastTransitionTime: now,
})
}
} else {
conditionManager.RemoveCondition(vmiCopy, virtv1.VirtualMachineInstanceEvictionRequested)
}
// VMI is owned by virt-handler, so patch instead of update
if vmi.IsRunning() || vmi.IsScheduled() {
patchSet := prepareVMIPatch(vmi, vmiCopy)
if patchSet.IsEmpty() {
return nil
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return fmt.Errorf("error preparing VMI patch: %v", err)
}
log.Log.Object(vmi).V(5).Infof("patching VMI: %s", string(patchBytes))
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{})
// We could not retry if the "test" fails but we have no sane way to detect that right now: https://github.com/kubernetes/kubernetes/issues/68202 for details
// So just retry like with any other errors
if err != nil {
return fmt.Errorf("patching of vmi conditions and activePods failed: %v", err)
}
return nil
}
reason := ""
if syncErr != nil {
reason = syncErr.Reason()
}
conditionManager.CheckFailure(vmiCopy, syncErr, reason)
controller.SetVMIPhaseTransitionTimestamp(&vmi.Status, &vmiCopy.Status)
// If we detect a change on the vmi we update the vmi
vmiChanged := !equality.Semantic.DeepEqual(vmi.Status, vmiCopy.Status) || !equality.Semantic.DeepEqual(vmi.Finalizers, vmiCopy.Finalizers) || !equality.Semantic.DeepEqual(vmi.Annotations, vmiCopy.Annotations) || !equality.Semantic.DeepEqual(vmi.Labels, vmiCopy.Labels)
if vmiChanged {
c.vmiExpectations.SetExpectations(key, 1, 0)
_, err = c.clientset.VirtualMachineInstance(vmi.Namespace).Update(context.Background(), vmiCopy, v1.UpdateOptions{})
if err != nil {
c.vmiExpectations.SetExpectations(key, 0, 0)
return err
}
}
return nil
}
// prepareVMIPatch generates a patch set for updating the VMI status.
func prepareVMIPatch(oldVMI, newVMI *virtv1.VirtualMachineInstance) *patch.PatchSet {
patchSet := patch.New()
// TODO(vladikr): Move to storage
if !equality.Semantic.DeepEqual(newVMI.Status.VolumeStatus, oldVMI.Status.VolumeStatus) {
// VolumeStatus changed which means either removed or added volumes.
if oldVMI.Status.VolumeStatus == nil {
patchSet.AddOption(patch.WithAdd("/status/volumeStatus", newVMI.Status.VolumeStatus))
} else {
patchSet.AddOption(
patch.WithTest("/status/volumeStatus", oldVMI.Status.VolumeStatus),
patch.WithReplace("/status/volumeStatus", newVMI.Status.VolumeStatus),
)
}
log.Log.V(3).Object(oldVMI).Infof("Patching Volume Status")
}
// We don't own the object anymore, so patch instead of update
vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
if !vmiConditions.ConditionsEqual(oldVMI, newVMI) {
patchSet.AddOption(
patch.WithTest("/status/conditions", oldVMI.Status.Conditions),
patch.WithReplace("/status/conditions", newVMI.Status.Conditions),
)
log.Log.V(3).Object(oldVMI).Infof("Patching VMI conditions")
}
if !equality.Semantic.DeepEqual(newVMI.Status.ActivePods, oldVMI.Status.ActivePods) {
patchSet.AddOption(
patch.WithTest("/status/activePods", oldVMI.Status.ActivePods),
patch.WithReplace("/status/activePods", newVMI.Status.ActivePods),
)
log.Log.V(3).Object(oldVMI).Infof("Patching VMI activePods")
}
if newVMI.Status.Phase != oldVMI.Status.Phase {
patchSet.AddOption(
patch.WithTest("/status/phase", oldVMI.Status.Phase),
patch.WithReplace("/status/phase", newVMI.Status.Phase),
)
log.Log.V(3).Object(oldVMI).Infof("Patching VMI phase")
}
if newVMI.Status.LauncherContainerImageVersion != oldVMI.Status.LauncherContainerImageVersion {
if oldVMI.Status.LauncherContainerImageVersion == "" {
patchSet.AddOption(patch.WithAdd("/status/launcherContainerImageVersion", newVMI.Status.LauncherContainerImageVersion))
} else {
patchSet.AddOption(
patch.WithTest("/status/launcherContainerImageVersion", oldVMI.Status.LauncherContainerImageVersion),
patch.WithReplace("/status/launcherContainerImageVersion", newVMI.Status.LauncherContainerImageVersion),
)
}
}
if !equality.Semantic.DeepEqual(oldVMI.Labels, newVMI.Labels) {
if oldVMI.Labels == nil {
patchSet.AddOption(patch.WithAdd("/metadata/labels", newVMI.Labels))
} else {
patchSet.AddOption(
patch.WithTest("/metadata/labels", oldVMI.Labels),
patch.WithReplace("/metadata/labels", newVMI.Labels),
)
}
}
if !equality.Semantic.DeepEqual(oldVMI.Annotations, newVMI.Annotations) {
if oldVMI.Annotations == nil {
patchSet.AddOption(patch.WithAdd("/metadata/annotations", newVMI.Annotations))
} else {
patchSet.AddOption(
patch.WithTest("/metadata/annotations", oldVMI.Annotations),
patch.WithReplace("/metadata/annotations", newVMI.Annotations),
)
}
}
// Sort network interfaces by name to ensure that the order does not affect the equality check.
// Prior to this an API patch flood would occur - see: https://github.com/kubevirt/kubevirt/issues/14442
cmpFunc := func(a, b virtv1.VirtualMachineInstanceNetworkInterface) int {
return strings.Compare(a.Name, b.Name)
}
newInterfaces := slices.SortedFunc(slices.Values(newVMI.Status.Interfaces), cmpFunc)
oldInterfaces := slices.SortedFunc(slices.Values(oldVMI.Status.Interfaces), cmpFunc)
// TODO(vladikr): Move to networking
if !equality.Semantic.DeepEqual(newInterfaces, oldInterfaces) {
patchSet.AddOption(
patch.WithTest("/status/interfaces", oldVMI.Status.Interfaces),
patch.WithAdd("/status/interfaces", newVMI.Status.Interfaces),
)
log.Log.V(3).Object(oldVMI).Infof("Patching Interface Status")
}
return patchSet
}
// These "dynamic" annotations/labels are Pod annotations/labels which may diverge from the VMI over time that we want to keep in sync.
func (c *Controller) syncDynamicAnnotationsAndLabelsToPod(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) (*k8sv1.Pod, error) {
patchSet := patch.New()
newPodAnnotations := maps.Clone(pod.Annotations)
newPodLabels := maps.Clone(pod.Labels)
syncMap := func(keys []string, vmiMap, podNewMap, podOrigMap map[string]string, subPath string) {
if podNewMap == nil {
podNewMap = map[string]string{}
}
changed := false
for _, key := range keys {
vmiVal, vmiExists := vmiMap[key]
podVal, podExists := podNewMap[key]
if vmiExists == podExists && vmiVal == podVal {
continue
}
changed = true
if !vmiExists {
delete(podNewMap, key)
} else {
podNewMap[key] = vmiVal
}
}
if !changed {
return
}
if podOrigMap == nil {
patchSet.AddOption(patch.WithAdd("/metadata/"+subPath, podNewMap))
} else {
patchSet.AddOption(
patch.WithTest("/metadata/"+subPath, podOrigMap),
patch.WithReplace("/metadata/"+subPath, podNewMap),
)
}
}
dynamicLabels := []string{virtv1.NodeNameLabel, virtv1.OutdatedLauncherImageLabel}
dynamicLabels = append(dynamicLabels, c.additionalLauncherLabelsSync...)
dynamicAnnotations := []string{descheduler.EvictPodAnnotationKeyAlpha, descheduler.EvictPodAnnotationKeyAlphaPreferNoEviction}
dynamicAnnotations = append(dynamicAnnotations, c.additionalLauncherAnnotationsSync...)
syncMap(
dynamicLabels,
vmi.Labels, newPodLabels, pod.ObjectMeta.Labels, "labels",
)
syncMap(
dynamicAnnotations,
vmi.Annotations, newPodAnnotations, pod.ObjectMeta.Annotations, "annotations",
)
if patchSet.IsEmpty() {
return pod, nil
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return pod, err
}
updatedPod, err := c.clientset.CoreV1().Pods(pod.Namespace).Patch(
context.Background(), pod.Name, types.JSONPatchType, patchBytes, v1.PatchOptions{},
)
if err != nil {
log.Log.Object(pod).Errorf("failed to sync dynamic pod annotations and labels during sync: %v", err)
return pod, err
}
return updatedPod, nil
}
func (c *Controller) setLauncherContainerInfo(vmi *virtv1.VirtualMachineInstance, curPodImage string) *virtv1.VirtualMachineInstance {
if curPodImage != "" && curPodImage != c.templateService.GetLauncherImage() {
if vmi.Labels == nil {
vmi.Labels = map[string]string{}
}
vmi.Labels[virtv1.OutdatedLauncherImageLabel] = ""
} else {
if vmi.Labels != nil {
delete(vmi.Labels, virtv1.OutdatedLauncherImageLabel)
}
}
vmi.Status.LauncherContainerImageVersion = curPodImage
return vmi
}
func (c *Controller) hasOwnerVM(vmi *virtv1.VirtualMachineInstance) bool {
controllerRef := v1.GetControllerOf(vmi)
if controllerRef == nil || controllerRef.Kind != virtv1.VirtualMachineGroupVersionKind.Kind {
return false
}
obj, exists, _ := c.vmStore.GetByKey(controller.NamespacedKey(vmi.Namespace, controllerRef.Name))
if !exists {
return false
}
ownerVM := obj.(*virtv1.VirtualMachine)
return controllerRef.UID == ownerVM.UID
}
func (c *Controller) syncReadyConditionFromPod(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) {
vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
podConditions := controller.NewPodConditionManager()
now := v1.Now()
if pod == nil || isTempPod(pod) {
vmiConditions.UpdateCondition(vmi, &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceReady,
Status: k8sv1.ConditionFalse,
Reason: virtv1.PodNotExistsReason,
Message: "virt-launcher pod has not yet been scheduled",
LastProbeTime: now,
LastTransitionTime: now,
})
} else if controller.IsPodDownOrGoingDown(pod) {
vmiConditions.UpdateCondition(vmi, &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceReady,
Status: k8sv1.ConditionFalse,
Reason: virtv1.PodTerminatingReason,
Message: "virt-launcher pod is terminating",
LastProbeTime: now,
LastTransitionTime: now,
})
} else if !vmi.IsRunning() {
vmiConditions.UpdateCondition(vmi, &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceReady,
Status: k8sv1.ConditionFalse,
Reason: virtv1.GuestNotRunningReason,
Message: "Guest VM is not reported as running",
LastProbeTime: now,
LastTransitionTime: now,
})
} else if podReadyCond := podConditions.GetCondition(pod, k8sv1.PodReady); podReadyCond != nil {
vmiConditions.UpdateCondition(vmi, &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceReady,
Status: podReadyCond.Status,
Reason: podReadyCond.Reason,
Message: podReadyCond.Message,
LastProbeTime: podReadyCond.LastProbeTime,
LastTransitionTime: podReadyCond.LastTransitionTime,
})
} else {
vmiConditions.UpdateCondition(vmi, &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceReady,
Status: k8sv1.ConditionFalse,
Reason: virtv1.PodConditionMissingReason,
Message: "virt-launcher pod is missing the Ready condition",
LastProbeTime: now,
LastTransitionTime: now,
})
}
}
func (c *Controller) syncPausedConditionToPod(vmi *virtv1.VirtualMachineInstance, originalPod *k8sv1.Pod) error {
vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
podConditions := controller.NewPodConditionManager()
newPod := originalPod.DeepCopy()
now := v1.Now()
if vmiConditions.HasConditionWithStatus(vmi, virtv1.VirtualMachineInstancePaused, k8sv1.ConditionTrue) {
if podConditions.HasConditionWithStatus(originalPod, virtv1.VirtualMachineUnpaused, k8sv1.ConditionTrue) {
podConditions.UpdateCondition(newPod, &k8sv1.PodCondition{
Type: virtv1.VirtualMachineUnpaused,
Status: k8sv1.ConditionFalse,
Reason: "Paused",
Message: "the virtual machine is paused",
LastProbeTime: now,
LastTransitionTime: now,
})
}
} else {
if !podConditions.HasConditionWithStatus(originalPod, virtv1.VirtualMachineUnpaused, k8sv1.ConditionTrue) {
podConditions.UpdateCondition(newPod, &k8sv1.PodCondition{
Type: virtv1.VirtualMachineUnpaused,
Status: k8sv1.ConditionTrue,
Reason: "NotPaused",
Message: "the virtual machine is not paused",
LastProbeTime: now,
LastTransitionTime: now,
})
}
}
if podConditions.ConditionsEqual(originalPod, newPod) {
return nil
}
originalBytes, err := json.Marshal(originalPod)
if err != nil {
return fmt.Errorf("could not serialize original object: %v", err)
}
modifiedBytes, err := json.Marshal(newPod)
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalBytes, modifiedBytes, k8sv1.Pod{})
if err != nil {
return fmt.Errorf("error preparing pod patch: %v", err)
}
log.Log.V(3).Object(originalPod).Infof("Patching pod conditions")
_, err = c.clientset.CoreV1().Pods(originalPod.Namespace).Patch(context.TODO(), originalPod.Name, types.StrategicMergePatchType, patchBytes, v1.PatchOptions{}, "status")
// We could not retry if the "test" fails but we have no sane way to detect that right now:
// https://github.com/kubernetes/kubernetes/issues/68202 for details
// So just retry like with any other errors
if err != nil {
log.Log.Object(originalPod).Errorf("Patching of pod conditions failed: %v", err)
return fmt.Errorf("patching of pod conditions failed: %v", err)
}
return nil
}
// checkForContainerImageError checks if an error has occured while handling the image of any of the pod's containers
// (including init containers), and returns a syncErr with the details of the error, or nil otherwise.
func checkForContainerImageError(pod *k8sv1.Pod) common.SyncError {
containerStatuses := append(append([]k8sv1.ContainerStatus{}, pod.Status.InitContainerStatuses...), pod.Status.ContainerStatuses...)
for _, containerStatus := range containerStatuses {
if containerStatus.State.Waiting == nil {
continue
}
reason := containerStatus.State.Waiting.Reason
if reason == controller.ErrImagePullReason || reason == controller.ImagePullBackOffReason {
return common.NewSyncError(fmt.Errorf("%s", containerStatus.State.Waiting.Message), reason)
}
}
return nil
}
func (c *Controller) deleteAllMatchingPods(vmi *virtv1.VirtualMachineInstance) error {
pods, err := c.listPodsFromNamespace(vmi.Namespace)
if err != nil {
return err
}
vmiKey := controller.VirtualMachineInstanceKey(vmi)
for _, pod := range pods {
if pod.DeletionTimestamp != nil && !isPodFinal(pod) || !v1.IsControlledBy(pod, vmi) {
continue
}
if err = c.deletePod(vmiKey, pod, v1.DeleteOptions{}); err != nil {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedDeletePodReason, "Failed to delete virtual machine pod %s", pod.Name)
return err
}
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, controller.SuccessfulDeletePodReason, "Deleted virtual machine pod %s", pod.Name)
}
return nil
}
func isPodFinal(pod *k8sv1.Pod) bool {
return pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed
}
func (c *Controller) listPodsFromNamespace(namespace string) ([]*k8sv1.Pod, error) {
objs, err := c.podIndexer.ByIndex(cache.NamespaceIndex, namespace)
if err != nil {
return nil, err
}
pods := []*k8sv1.Pod{}
for _, obj := range objs {
pod := obj.(*k8sv1.Pod)
pods = append(pods, pod)
}
return pods, nil
}
func (c *Controller) setActivePods(vmi *virtv1.VirtualMachineInstance) (*virtv1.VirtualMachineInstance, error) {
pods, err := c.listPodsFromNamespace(vmi.Namespace)
if err != nil {
return nil, err
}
activePods := make(map[types.UID]string)
count := 0
for _, pod := range pods {
if !v1.IsControlledBy(pod, vmi) {
continue
}
count++
activePods[pod.UID] = pod.Spec.NodeName
}
if count == 0 && vmi.Status.ActivePods == nil {
return vmi, nil
}
vmi.Status.ActivePods = activePods
return vmi, nil
}
func (c *Controller) allPodsDeleted(vmi *virtv1.VirtualMachineInstance) (bool, error) {
pods, err := c.listPodsFromNamespace(vmi.Namespace)
if err != nil {
return false, err
}
for _, pod := range pods {
if v1.IsControlledBy(pod, vmi) {
return false, nil
}
}
return true, nil
}
func (c *Controller) deletePod(vmiKey string, pod *k8sv1.Pod, options v1.DeleteOptions) error {
c.podExpectations.ExpectDeletions(vmiKey, []string{controller.PodKey(pod)})
err := c.clientset.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, options)
if err != nil {
c.podExpectations.DeletionObserved(vmiKey, controller.PodKey(pod))
if k8serrors.IsNotFound(err) {
return nil
}
}
return err
}
func (c *Controller) createPod(key, namespace string, pod *k8sv1.Pod) (*k8sv1.Pod, error) {
c.podExpectations.ExpectCreations(key, 1)
pod, err := c.clientset.CoreV1().Pods(namespace).Create(context.Background(), pod, v1.CreateOptions{})
if err != nil {
c.podExpectations.CreationObserved(key)
}
return pod, err
}
func isTempPod(pod *k8sv1.Pod) bool {
_, ok := pod.Annotations[virtv1.EphemeralProvisioningObject]
return ok
}
func shouldSetMigrationTransport(pod *k8sv1.Pod) bool {
_, ok := pod.Annotations[virtv1.MigrationTransportUnixAnnotation]
return ok
}
func (c *Controller) cleanupWaitForFirstConsumerTemporaryPods(vmi *virtv1.VirtualMachineInstance, virtLauncherPod *k8sv1.Pod) error {
triggerPods, err := c.waitForFirstConsumerTemporaryPods(vmi, virtLauncherPod)
if err != nil {
return err
}
return c.deleteRunningOrFinishedWFFCPods(vmi, triggerPods...)
}
func (c *Controller) deleteRunningOrFinishedWFFCPods(vmi *virtv1.VirtualMachineInstance, pods ...*k8sv1.Pod) error {
for _, pod := range pods {
err := c.deleteRunningFinishedOrFailedPod(vmi, pod)
if err != nil && !k8serrors.IsNotFound(err) {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedDeletePodReason, "Failed to delete WaitForFirstConsumer temporary pod %s", pod.Name)
return err
}
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, controller.SuccessfulDeletePodReason, "Deleted WaitForFirstConsumer temporary pod %s", pod.Name)
}
return nil
}
func (c *Controller) deleteRunningFinishedOrFailedPod(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) error {
zero := int64(0)
if pod.Status.Phase == k8sv1.PodRunning || pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed {
vmiKey := controller.VirtualMachineInstanceKey(vmi)
return c.deletePod(vmiKey, pod, v1.DeleteOptions{GracePeriodSeconds: &zero})
}
return nil
}
func (c *Controller) waitForFirstConsumerTemporaryPods(vmi *virtv1.VirtualMachineInstance, virtLauncherPod *k8sv1.Pod) ([]*k8sv1.Pod, error) {
var temporaryPods []*k8sv1.Pod
// Get all pods from the namespace
pods, err := c.listPodsFromNamespace(vmi.Namespace)
if err != nil {
return temporaryPods, err
}
for _, pod := range pods {
// Cleanup candidates are temporary pods that are either controlled by the VMI or the virt launcher pod
if !isTempPod(pod) {
continue
}
if v1.IsControlledBy(pod, vmi) {
temporaryPods = append(temporaryPods, pod)
}
if v1.IsControlledBy(pod, virtLauncherPod) {
temporaryPods = append(temporaryPods, pod)
}
}
return temporaryPods, nil
}
func (c *Controller) requireCPUHotplug(vmi *virtv1.VirtualMachineInstance) bool {
if vmi.Status.CurrentCPUTopology == nil || vmi.Spec.Domain.CPU == nil || vmi.Spec.Domain.CPU.MaxSockets == 0 {
return false
}
cpuTopoLogyFromStatus := &virtv1.CPU{
Cores: vmi.Status.CurrentCPUTopology.Cores,
Sockets: vmi.Status.CurrentCPUTopology.Sockets,
Threads: vmi.Status.CurrentCPUTopology.Threads,
}
return hardware.GetNumberOfVCPUs(vmi.Spec.Domain.CPU) != hardware.GetNumberOfVCPUs(cpuTopoLogyFromStatus)
}
func (c *Controller) requireMemoryHotplug(vmi *virtv1.VirtualMachineInstance) bool {
if vmi.Status.Memory == nil || vmi.Spec.Domain.Memory == nil || vmi.Spec.Domain.Memory.Guest == nil || vmi.Spec.Domain.Memory.MaxGuest == nil {
return false
}
return vmi.Spec.Domain.Memory.Guest.Value() != vmi.Status.Memory.GuestRequested.Value()
}
func (c *Controller) syncMemoryHotplug(vmi *virtv1.VirtualMachineInstance) {
syncHotplugCondition(vmi, virtv1.VirtualMachineInstanceMemoryChange)
// store additionalGuestMemoryOverheadRatio
overheadRatio := c.clusterConfig.GetConfig().AdditionalGuestMemoryOverheadRatio
if overheadRatio != nil {
if vmi.Labels == nil {
vmi.Labels = map[string]string{}
}
vmi.Labels[virtv1.MemoryHotplugOverheadRatioLabel] = *overheadRatio
}
}
func (c *Controller) syncMigrationRequiredCondition(vmi *virtv1.VirtualMachineInstance) {
const pendingMigrationReEvalPeriod = 10 * time.Second
if migrations.IsMigrating(vmi) {
return
}
result := c.netMigrationEvaluator.Evaluate(vmi)
cm := controller.NewVirtualMachineInstanceConditionManager()
existingCondition := cm.GetCondition(vmi, virtv1.VirtualMachineInstanceMigrationRequired)
switch {
case result == k8sv1.ConditionUnknown && existingCondition == nil:
return
case result == k8sv1.ConditionUnknown && existingCondition != nil:
cm.RemoveCondition(vmi, virtv1.VirtualMachineInstanceMigrationRequired)
return
}
if existingCondition != nil && existingCondition.Status == k8sv1.ConditionTrue {
return
}
cm.UpdateCondition(vmi, newMigrationRequiredCondition(result))
if result == k8sv1.ConditionTrue {
return
}
// Re-enqueue the VMI object in order to make sure the VMI will be handled again after at most pendingMigrationReEvalPeriod.
// This is done in order to handle a scenario where the status was set to `False`, and none of the objects
// the VMI controller watches had changed.
key, _ := controller.KeyFunc(vmi)
c.Queue.AddAfter(key, pendingMigrationReEvalPeriod)
}
func newMigrationRequiredCondition(status k8sv1.ConditionStatus) *virtv1.VirtualMachineInstanceCondition {
reason := virtv1.VirtualMachineInstanceReasonAutoMigrationDueToLiveUpdate
if status == k8sv1.ConditionFalse {
reason = virtv1.VirtualMachineInstanceReasonAutoMigrationPending
}
return &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceMigrationRequired,
Status: status,
LastProbeTime: v1.Time{},
LastTransitionTime: v1.Now(),
Reason: reason,
Message: "",
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmi
import (
"encoding/json"
"fmt"
"sort"
"strings"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
virtv1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/controller"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/common"
)
// addPVC handles the addition of a PVC, enqueuing affected VMIs.
func (c *Controller) addPVC(obj interface{}) {
pvc := obj.(*k8sv1.PersistentVolumeClaim)
if pvc.DeletionTimestamp != nil {
return
}
persistentStateFor, exists := pvc.Labels[backendstorage.PVCPrefix]
if exists {
vmiKey := controller.NamespacedKey(pvc.Namespace, persistentStateFor)
c.pvcExpectations.CreationObserved(vmiKey)
c.Queue.Add(vmiKey)
return // The PVC is a backend-storage PVC, won't be listed by `c.listVMIsMatchingDV()`
}
vmis, err := c.listVMIsMatchingDV(pvc.Namespace, pvc.Name)
if err != nil {
return
}
for _, vmi := range vmis {
log.Log.V(4).Object(pvc).Infof("PVC created for vmi %s", vmi.Name)
c.enqueueVirtualMachine(vmi)
}
}
// updatePVC handles updates to a PVC, enqueuing affected VMIs if capacity or requested size changes.
func (c *Controller) updatePVC(old, cur interface{}) {
curPVC := cur.(*k8sv1.PersistentVolumeClaim)
oldPVC := old.(*k8sv1.PersistentVolumeClaim)
if curPVC.ResourceVersion == oldPVC.ResourceVersion {
// Periodic resync will send update events for all known PVCs.
// Two different versions of the same PVC will always
// have different RVs.
return
}
if curPVC.DeletionTimestamp != nil {
return
}
if equality.Semantic.DeepEqual(curPVC.Status.Capacity, oldPVC.Status.Capacity) &&
equality.Semantic.DeepEqual(curPVC.Spec.Resources.Requests, oldPVC.Spec.Resources.Requests) {
// We only do something when the capacity or the requested size changes.
return
}
vmis, err := c.listVMIsMatchingDV(curPVC.Namespace, curPVC.Name)
if err != nil {
log.Log.Object(curPVC).Errorf("Error encountered getting VMIs for DataVolume: %v", err)
return
}
for _, vmi := range vmis {
log.Log.V(4).Object(curPVC).Infof("PVC updated for vmi %s", vmi.Name)
c.enqueueVirtualMachine(vmi)
}
}
// updateVM handles updates to a VM, enqueuing affected VMI only when VM's volumes update.
// NOTE: this is temporary to support ephemeral hotplug volume metrics
// will be removed once DeclarativeHotplugVolumes feature gate is enabled by default
func (c *Controller) updateVM(prev, curr interface{}) {
currVM := curr.(*virtv1.VirtualMachine)
prevVM := prev.(*virtv1.VirtualMachine)
if currVM.ResourceVersion == prevVM.ResourceVersion {
return
}
// only requeue VMI if VM's volumes have changed
if !equality.Semantic.DeepEqual(currVM.Spec.Template.Spec.Volumes, prevVM.Spec.Template.Spec.Volumes) {
vmiKey := controller.NamespacedKey(currVM.Namespace, currVM.Name)
obj, exists, err := c.vmiIndexer.GetByKey(vmiKey)
if err != nil || !exists {
return
}
vmi := obj.(*virtv1.VirtualMachineInstance)
controllerRef := v1.GetControllerOf(vmi)
if controllerRef != nil && controllerRef.UID == currVM.UID {
log.Log.V(4).Object(currVM).Infof("VM volumes updated for vmi %s", vmi.Name)
c.enqueueVirtualMachine(vmi)
}
}
}
// listVMIsMatchingDV finds all VMIs referencing a given DataVolume or PVC name.
func (c *Controller) listVMIsMatchingDV(namespace, dvName string) ([]*virtv1.VirtualMachineInstance, error) {
// TODO - refactor if/when dv/pvc do not have the same name
vmis := []*virtv1.VirtualMachineInstance{}
for _, indexName := range []string{"dv", "pvc"} {
objs, err := c.vmiIndexer.ByIndex(indexName, namespace+"/"+dvName)
if err != nil {
return nil, err
}
for _, obj := range objs {
vmi := obj.(*virtv1.VirtualMachineInstance)
vmis = append(vmis, vmi.DeepCopy())
}
}
return vmis, nil
}
// handleBackendStorage manages backend storage PVC creation for the VMI.
func (c *Controller) handleBackendStorage(vmi *virtv1.VirtualMachineInstance) (string, common.SyncError) {
key, err := controller.KeyFunc(vmi)
if err != nil {
return "", common.NewSyncError(err, controller.FailedBackendStorageCreateReason)
}
if !backendstorage.IsBackendStorageNeeded(vmi) {
pvc := backendstorage.PVCForVMI(c.pvcIndexer, vmi)
if pvc != nil {
if err = c.backendStorage.DeletePVCForVMI(vmi, pvc.Name); err != nil {
return "", common.NewSyncError(err, "Failed deleting backend storage")
}
}
return "", nil
}
pvc := backendstorage.PVCForVMI(c.pvcIndexer, vmi)
if pvc == nil {
c.pvcExpectations.ExpectCreations(key, 1)
if pvc, err = c.backendStorage.CreatePVCForVMI(vmi); err != nil {
c.pvcExpectations.CreationObserved(key)
return "", common.NewSyncError(err, controller.FailedBackendStorageCreateReason)
}
}
return pvc.Name, nil
}
func (c *Controller) processHotplugVolumeStatus(
vmi *virtv1.VirtualMachineInstance,
volumeName string,
pvcName string,
status *virtv1.VolumeStatus,
attachmentPod *k8sv1.Pod,
) {
statusCopy := status.DeepCopy()
if statusCopy.HotplugVolume == nil {
statusCopy.HotplugVolume = &virtv1.HotplugVolumeStatus{}
}
if attachmentPod == nil {
if !c.volumeReady(statusCopy.Phase) {
statusCopy.HotplugVolume.AttachPodUID = ""
// Volume is not hotplugged in VM and Pod is gone, or hasn't been created yet, check for the PVC associated with the volume to set phase and message
phase, reason, message := c.getVolumePhaseMessageReason(pvcName, vmi.Namespace)
statusCopy.Phase = phase
log.Log.V(3).Infof("Setting phase %s for volume %s", phase, volumeName)
statusCopy.Message = message
statusCopy.Reason = reason
}
} else {
statusCopy.HotplugVolume.AttachPodName = attachmentPod.Name
if len(attachmentPod.Status.ContainerStatuses) == 1 && attachmentPod.Status.ContainerStatuses[0].Ready {
statusCopy.HotplugVolume.AttachPodUID = attachmentPod.UID
} else {
// Remove UID of old pod if a new one is available, but not yet ready
statusCopy.HotplugVolume.AttachPodUID = ""
}
if canMoveToAttachedPhase(statusCopy.Phase) {
statusCopy.Phase = virtv1.HotplugVolumeAttachedToNode
log.Log.V(3).Infof("Setting phase %s for volume %s", statusCopy.Phase, volumeName)
statusCopy.Message = fmt.Sprintf("Created hotplug attachment pod %s, for volume %s", attachmentPod.Name, volumeName)
statusCopy.Reason = controller.SuccessfulCreatePodReason
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, statusCopy.Reason, statusCopy.Message)
}
}
*status = *statusCopy
}
func (c *Controller) processPVCInfo(status *virtv1.VolumeStatus, pvcName string, namespace string, isUtilityVolume bool) error {
statusCopy := status.DeepCopy()
pvcInterface, pvcExists, _ := c.pvcIndexer.GetByKey(fmt.Sprintf("%s/%s", namespace, pvcName))
if pvcExists {
pvc := pvcInterface.(*k8sv1.PersistentVolumeClaim)
if isUtilityVolume && storagetypes.IsPVCBlock(pvc.Spec.VolumeMode) {
statusCopy.Phase = virtv1.VolumePending
statusCopy.Reason = controller.PVCNotReadyReason
statusCopy.Message = fmt.Sprintf("Utility volume PVC %s must be filesystem mode, not block mode", pvcName)
log.Log.Errorf("Utility volume %s references block mode PVC %s, but utility volumes require filesystem mode", statusCopy.Name, pvcName)
*status = *statusCopy
return nil
}
filesystemOverhead, err := c.getFilesystemOverhead(pvc)
if err != nil {
log.Log.Reason(err).Errorf("Failed to get filesystem overhead for PVC %s/%s", namespace, pvcName)
return err
}
statusCopy.PersistentVolumeClaimInfo = &virtv1.PersistentVolumeClaimInfo{
ClaimName: pvc.Name,
AccessModes: pvc.Spec.AccessModes,
VolumeMode: pvc.Spec.VolumeMode,
Capacity: pvc.Status.Capacity,
Requests: pvc.Spec.Resources.Requests,
Preallocated: storagetypes.IsPreallocated(pvc.ObjectMeta.Annotations),
FilesystemOverhead: &filesystemOverhead,
}
}
*status = *statusCopy
return nil
}
// updateVolumeStatus updates the VMI's VolumeStatus based on pod and volume state.
func (c *Controller) updateVolumeStatus(vmi *virtv1.VirtualMachineInstance, virtlauncherPod *k8sv1.Pod) error {
oldStatus := vmi.Status.DeepCopy().VolumeStatus
oldStatusMap := make(map[string]virtv1.VolumeStatus)
for _, status := range oldStatus {
oldStatusMap[status.Name] = status
}
hotplugVolumes := storagetypes.GetHotplugVolumes(vmi, virtlauncherPod)
hotplugVolumesMap := make(map[string]*virtv1.Volume)
for _, volume := range hotplugVolumes {
hotplugVolumesMap[volume.Name] = volume
}
attachmentPods, err := controller.AttachmentPods(virtlauncherPod, c.podIndexer)
if err != nil {
return err
}
attachmentPod, _ := getActiveAndOldAttachmentPods(hotplugVolumes, attachmentPods)
newStatus := make([]virtv1.VolumeStatus, 0)
backendStoragePVC := backendstorage.PVCForVMI(c.pvcIndexer, vmi)
if backendStoragePVC != nil {
if backendStorage, ok := oldStatusMap[backendStoragePVC.Name]; ok {
newStatus = append(newStatus, backendStorage)
}
}
for _, volume := range vmi.Spec.Volumes {
status := virtv1.VolumeStatus{}
if existingStatus, ok := oldStatusMap[volume.Name]; ok {
status = existingStatus
} else {
status.Name = volume.Name
}
// Remove from map so I can detect existing volumes that have been removed from spec.
delete(oldStatusMap, volume.Name)
if volume.MemoryDump != nil && status.MemoryDumpVolume == nil {
status.MemoryDumpVolume = &virtv1.DomainMemoryDumpInfo{
ClaimName: volume.Name,
}
}
pvcName := storagetypes.PVCNameFromVirtVolume(&volume)
if _, ok := hotplugVolumesMap[volume.Name]; ok {
c.processHotplugVolumeStatus(vmi, volume.Name, pvcName, &status, attachmentPod)
}
if volume.VolumeSource.PersistentVolumeClaim != nil || volume.VolumeSource.DataVolume != nil || volume.VolumeSource.MemoryDump != nil {
err = c.processPVCInfo(&status, pvcName, vmi.Namespace, false)
if err != nil {
return err
}
}
newStatus = append(newStatus, status)
}
for _, utilityVolume := range vmi.Spec.UtilityVolumes {
status := virtv1.VolumeStatus{}
if existingStatus, ok := oldStatusMap[utilityVolume.Name]; ok {
status = existingStatus
} else {
status.Name = utilityVolume.Name
}
// Remove from map so we can detect volumes removed from spec
delete(oldStatusMap, utilityVolume.Name)
c.processHotplugVolumeStatus(vmi, utilityVolume.Name, utilityVolume.ClaimName, &status, attachmentPod)
err = c.processPVCInfo(&status, utilityVolume.ClaimName, vmi.Namespace, true)
if err != nil {
return err
}
newStatus = append(newStatus, status)
}
// We have updated the status of current volumes, but if a volume was removed, we want to keep that status, until there is no
// associated pod, then remove it. Any statuses left in the map are statuses without a matching volume in the spec.
for volumeName, status := range oldStatusMap {
attachmentPod := findAttachmentPodByVolumeName(volumeName, attachmentPods)
if attachmentPod != nil {
status.HotplugVolume.AttachPodName = attachmentPod.Name
status.HotplugVolume.AttachPodUID = attachmentPod.UID
status.Phase = phaseForUnpluggedVolume(status.Phase)
log.Log.V(3).Infof("Setting phase %s for volume %s", status.Phase, volumeName)
if status.Phase == virtv1.HotplugVolumeDetaching && attachmentPod.DeletionTimestamp != nil {
status.Message = fmt.Sprintf("Deleted hotplug attachment pod %s, for volume %s", attachmentPod.Name, volumeName)
status.Reason = controller.SuccessfulDeletePodReason
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, status.Reason, status.Message)
}
// If the pod exists, we keep the status.
newStatus = append(newStatus, status)
} else {
log.Log.Object(vmi).V(3).Infof("Deleted status for volume %s", volumeName)
}
}
sort.SliceStable(newStatus, func(i, j int) bool {
return strings.Compare(newStatus[i].Name, newStatus[j].Name) == -1
})
vmi.Status.VolumeStatus = newStatus
return nil
}
func (c *Controller) checkEphemeralHotplugVolumes(vmi *virtv1.VirtualMachineInstance) {
vm := c.getOwnerVM(vmi)
if vmi == nil || vm == nil {
return
}
vmVolumeMap := map[string]struct{}{}
for _, volume := range vm.Spec.Template.Spec.Volumes {
vmVolumeMap[volume.Name] = struct{}{}
}
annotations := vmi.Annotations
if annotations == nil {
annotations = make(map[string]string)
}
var ephemeralVols []string
// check if the vmi has any volumes that are not in the vm spec
for _, volume := range vmi.Spec.Volumes {
if !storagetypes.IsHotplugVolume(&volume) {
continue
}
if _, exists := vmVolumeMap[volume.Name]; !exists {
ephemeralVols = append(ephemeralVols, volume.Name)
}
}
if len(ephemeralVols) == 0 {
// no ephemeral hotplugs were found, remove label if it exists
delete(vmi.Annotations, virtv1.EphemeralHotplugAnnotation)
} else {
formattedVols, err := json.Marshal(ephemeralVols)
if err != nil {
log.Log.Reason(err).Error("could not serialize ephemeral volume list")
return
}
annotations[virtv1.EphemeralHotplugAnnotation] = string(formattedVols)
// will be patched at the end of updateStatus
vmi.Annotations = annotations
}
}
func phaseForUnpluggedVolume(phase virtv1.VolumePhase) virtv1.VolumePhase {
switch phase {
case virtv1.VolumeReady:
return virtv1.VolumeReady
case virtv1.HotplugVolumeMounted:
return virtv1.HotplugVolumeMounted
}
return virtv1.HotplugVolumeDetaching
}
// volumeReady checks if a volume is in a ready state.
func (c *Controller) volumeReady(phase virtv1.VolumePhase) bool {
return phase == virtv1.VolumeReady
}
// getVolumePhaseMessageReason determines the phase, reason, and message for a volume.
func (c *Controller) getVolumePhaseMessageReason(claimName string, namespace string) (virtv1.VolumePhase, string, string) {
pvcInterface, pvcExists, _ := c.pvcIndexer.GetByKey(fmt.Sprintf("%s/%s", namespace, claimName))
if !pvcExists {
return virtv1.VolumePending, controller.FailedPvcNotFoundReason, fmt.Sprintf("PVC %s not found", claimName)
}
pvc := pvcInterface.(*k8sv1.PersistentVolumeClaim)
if pvc.Status.Phase == k8sv1.ClaimPending {
return virtv1.VolumePending, controller.PVCNotReadyReason, "PVC is in phase ClaimPending"
} else if pvc.Status.Phase == k8sv1.ClaimBound {
return virtv1.VolumeBound, controller.PVCNotReadyReason, "PVC is in phase Bound"
}
return virtv1.VolumePending, controller.PVCNotReadyReason, "PVC is in phase Lost"
}
// getFilesystemOverhead retrieves the filesystem overhead for a PVC.
func (c *Controller) getFilesystemOverhead(pvc *k8sv1.PersistentVolumeClaim) (virtv1.Percent, error) {
cdiInstances := len(c.cdiStore.List())
if cdiInstances != 1 {
if cdiInstances > 1 {
log.Log.V(3).Object(pvc).Reason(storagetypes.ErrMultipleCdiInstances).Infof(storagetypes.FSOverheadMsg)
} else {
log.Log.V(3).Object(pvc).Reason(storagetypes.ErrFailedToFindCdi).Infof(storagetypes.FSOverheadMsg)
}
return storagetypes.DefaultFSOverhead, nil
}
cdiConfigInterface, cdiConfigExists, err := c.cdiConfigStore.GetByKey(storagetypes.ConfigName)
if !cdiConfigExists || err != nil {
return "0", fmt.Errorf("Failed to find CDIConfig but CDI exists: %w", err)
}
cdiConfig, ok := cdiConfigInterface.(*cdiv1.CDIConfig)
if !ok {
return "0", fmt.Errorf("Failed to convert CDIConfig object %v to type CDIConfig", cdiConfigInterface)
}
return storagetypes.GetFilesystemOverhead(pvc.Spec.VolumeMode, pvc.Spec.StorageClassName, cdiConfig)
}
func (c *Controller) syncVolumesUpdate(vmi *virtv1.VirtualMachineInstance) {
vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
condition := virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceVolumesChange,
LastTransitionTime: v1.Now(),
Status: k8sv1.ConditionTrue,
Message: "migrate volumes",
}
vmiConditions.UpdateCondition(vmi, &condition)
}
func (c *Controller) requireVolumesUpdate(vmi *virtv1.VirtualMachineInstance) bool {
if len(vmi.Status.MigratedVolumes) < 1 {
return false
}
if controller.NewVirtualMachineInstanceConditionManager().HasCondition(vmi, virtv1.VirtualMachineInstanceVolumesChange) {
return false
}
migVolsMap := make(map[string]string)
for _, v := range vmi.Status.MigratedVolumes {
migVolsMap[v.SourcePVCInfo.ClaimName] = v.DestinationPVCInfo.ClaimName
}
for _, v := range vmi.Spec.Volumes {
claim := storagetypes.PVCNameFromVirtVolume(&v)
if claim == "" {
continue
}
if _, ok := migVolsMap[claim]; !ok {
return true
}
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmi
import (
"context"
"fmt"
"time"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/trace"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/controller"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
traceUtils "kubevirt.io/kubevirt/pkg/util/trace"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/topology"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/vsock"
)
const (
deleteNotifFailed = "Failed to process delete notification"
tombstoneGetObjectErrFmt = "couldn't get object from tombstone %+v"
)
func NewController(templateService templateService,
vmiInformer cache.SharedIndexInformer,
vmInformer cache.SharedIndexInformer,
podInformer cache.SharedIndexInformer,
pvcInformer cache.SharedIndexInformer,
migrationInformer cache.SharedIndexInformer,
storageClassInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
clientset kubecli.KubevirtClient,
dataVolumeInformer cache.SharedIndexInformer,
storageProfileInformer cache.SharedIndexInformer,
cdiInformer cache.SharedIndexInformer,
cdiConfigInformer cache.SharedIndexInformer,
clusterConfig *virtconfig.ClusterConfig,
topologyHinter topology.Hinter,
netAnnotationsGenerator annotationsGenerator,
netStatusUpdater statusUpdater,
netSpecValidator specValidator,
netMigrationEvaluator migrationEvaluator,
additionalLauncherAnnotationsSync []string,
additionalLauncherLabelsSync []string,
) (*Controller, error) {
c := &Controller{
templateService: templateService,
Queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-vmi"},
),
vmiIndexer: vmiInformer.GetIndexer(),
vmStore: vmInformer.GetStore(),
podIndexer: podInformer.GetIndexer(),
pvcIndexer: pvcInformer.GetIndexer(),
migrationIndexer: migrationInformer.GetIndexer(),
recorder: recorder,
clientset: clientset,
podExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
vmiExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
pvcExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
dataVolumeIndexer: dataVolumeInformer.GetIndexer(),
cdiStore: cdiInformer.GetStore(),
cdiConfigStore: cdiConfigInformer.GetStore(),
clusterConfig: clusterConfig,
topologyHinter: topologyHinter,
cidsMap: vsock.NewCIDsMap(),
backendStorage: backendstorage.NewBackendStorage(clientset, clusterConfig, storageClassInformer.GetStore(), storageProfileInformer.GetStore(), pvcInformer.GetIndexer()),
netAnnotationsGenerator: netAnnotationsGenerator,
updateNetworkStatus: netStatusUpdater,
validateNetworkSpec: netSpecValidator,
netMigrationEvaluator: netMigrationEvaluator,
additionalLauncherAnnotationsSync: additionalLauncherAnnotationsSync,
additionalLauncherLabelsSync: additionalLauncherLabelsSync,
}
c.hasSynced = func() bool {
return vmInformer.HasSynced() && vmiInformer.HasSynced() && podInformer.HasSynced() &&
dataVolumeInformer.HasSynced() && cdiConfigInformer.HasSynced() && cdiInformer.HasSynced() &&
pvcInformer.HasSynced() && storageClassInformer.HasSynced() && storageProfileInformer.HasSynced()
}
_, err := vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addVirtualMachineInstance,
DeleteFunc: c.deleteVirtualMachineInstance,
UpdateFunc: c.updateVirtualMachineInstance,
})
if err != nil {
return nil, err
}
_, err = podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPod,
DeleteFunc: c.onPodDelete,
UpdateFunc: c.updatePod,
})
if err != nil {
return nil, err
}
_, err = dataVolumeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addDataVolume,
DeleteFunc: c.deleteDataVolume,
UpdateFunc: c.updateDataVolume,
})
if err != nil {
return nil, err
}
_, err = pvcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addPVC,
UpdateFunc: c.updatePVC,
})
if err != nil {
return nil, err
}
// NOTE: this is temporary to support ephemeral hotplug volume metrics
// will be removed once DeclarativeHotplugVolumes feature gate is enabled by default
_, err = vmInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updateVM,
})
if err != nil {
return nil, err
}
return c, nil
}
type informalSyncError struct {
err error
reason string
}
func (i informalSyncError) Error() string {
return i.err.Error()
}
func (i informalSyncError) Reason() string {
return i.reason
}
func (i informalSyncError) RequiresRequeue() bool {
return false
}
type templateService interface {
RenderLaunchManifest(vmi *virtv1.VirtualMachineInstance) (*k8sv1.Pod, error)
RenderLaunchManifestNoVm(*virtv1.VirtualMachineInstance) (*k8sv1.Pod, error)
RenderHotplugAttachmentPodTemplate(volumes []*virtv1.Volume, ownerPod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance, claimMap map[string]*k8sv1.PersistentVolumeClaim) (*k8sv1.Pod, error)
RenderHotplugAttachmentTriggerPodTemplate(volume *virtv1.Volume, ownerPod *k8sv1.Pod, vmi *virtv1.VirtualMachineInstance, pvcName string, isBlock, tempPod bool) (*k8sv1.Pod, error)
GetLauncherImage() string
}
type annotationsGenerator interface {
GenerateFromActivePod(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) map[string]string
}
type statusUpdater func(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod) error
type specValidator func(*k8sfield.Path, *virtv1.VirtualMachineInstanceSpec, *virtconfig.ClusterConfig) []v1.StatusCause
type migrationEvaluator interface {
// Evaluate determines if a VMI should request an automatic migration.
//
// The method returns one of three values:
// * ConditionUnknown: No action needed; the VMI should not be marked for auto-migration.
// * ConditionTrue: Mark the VMI for immediate migration.
// * ConditionFalse: Mark the VMI for pending migration.
Evaluate(vmi *virtv1.VirtualMachineInstance) k8sv1.ConditionStatus
}
type Controller struct {
templateService templateService
clientset kubecli.KubevirtClient
Queue workqueue.TypedRateLimitingInterface[string]
vmiIndexer cache.Indexer
vmStore cache.Store
podIndexer cache.Indexer
pvcIndexer cache.Indexer
migrationIndexer cache.Indexer
topologyHinter topology.Hinter
recorder record.EventRecorder
podExpectations *controller.UIDTrackingControllerExpectations
vmiExpectations *controller.UIDTrackingControllerExpectations
pvcExpectations *controller.UIDTrackingControllerExpectations
dataVolumeIndexer cache.Indexer
cdiStore cache.Store
cdiConfigStore cache.Store
clusterConfig *virtconfig.ClusterConfig
cidsMap vsock.Allocator
backendStorage *backendstorage.BackendStorage
hasSynced func() bool
netAnnotationsGenerator annotationsGenerator
updateNetworkStatus statusUpdater
validateNetworkSpec specValidator
netMigrationEvaluator migrationEvaluator
additionalLauncherAnnotationsSync []string
additionalLauncherLabelsSync []string
}
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.Queue.ShutDown()
log.Log.Info("Starting vmi controller.")
// Wait for cache sync before we start the pod controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Sync the CIDs from exist VMIs
var vmis []*virtv1.VirtualMachineInstance
for _, obj := range c.vmiIndexer.List() {
vmi := obj.(*virtv1.VirtualMachineInstance)
vmis = append(vmis, vmi)
}
c.cidsMap.Sync(vmis)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping vmi controller.")
}
func (c *Controller) runWorker() {
for c.Execute() {
}
}
var virtControllerVMIWorkQueueTracer = &traceUtils.Tracer{Threshold: time.Second}
func (c *Controller) Execute() bool {
key, quit := c.Queue.Get()
if quit {
return false
}
virtControllerVMIWorkQueueTracer.StartTrace(key, "virt-controller VMI workqueue", trace.Field{Key: "Workqueue Key", Value: key})
defer virtControllerVMIWorkQueueTracer.StopTrace(key)
defer c.Queue.Done(key)
err := c.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing VirtualMachineInstance %v", key)
c.Queue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed VirtualMachineInstance %v", key)
c.Queue.Forget(key)
}
return true
}
func (c *Controller) execute(key string) error {
// Fetch the latest Vm state from cache
obj, exists, err := c.vmiIndexer.GetByKey(key)
if err != nil {
return err
}
// Once all finalizers are removed the vmi gets deleted and we can clean all expectations
if !exists {
c.podExpectations.DeleteExpectations(key)
c.vmiExpectations.DeleteExpectations(key)
c.cidsMap.Remove(key)
return nil
}
vmi := obj.(*virtv1.VirtualMachineInstance)
logger := log.Log.Object(vmi)
// this must be first step in execution. Writing the object
// when api version changes ensures our api stored version is updated.
if !controller.ObservedLatestApiVersionAnnotation(vmi) {
vmi := vmi.DeepCopy()
controller.SetLatestApiVersionAnnotation(vmi)
key := controller.VirtualMachineInstanceKey(vmi)
c.vmiExpectations.SetExpectations(key, 1, 0)
_, err = c.clientset.VirtualMachineInstance(vmi.ObjectMeta.Namespace).Update(context.Background(), vmi, v1.UpdateOptions{})
if err != nil {
c.vmiExpectations.SetExpectations(key, 0, 0)
return err
}
return nil
}
// If needsSync is true (expectations fulfilled) we can make save assumptions if virt-handler or virt-controller owns the pod
needsSync := c.podExpectations.SatisfiedExpectations(key) && c.vmiExpectations.SatisfiedExpectations(key) && c.pvcExpectations.SatisfiedExpectations(key)
if !needsSync {
return nil
}
// Only consider pods which belong to this vmi
// excluding unfinalized migration targets from this list.
pod, err := controller.CurrentVMIPod(vmi, c.podIndexer)
if err != nil {
logger.Reason(err).Error("Failed to fetch pods for namespace from cache.")
return err
}
// Get all dataVolumes associated with this vmi
dataVolumes, err := storagetypes.ListDataVolumesFromVolumes(vmi.Namespace, vmi.Spec.Volumes, c.dataVolumeIndexer, c.pvcIndexer)
if err != nil {
logger.Reason(err).Error("Failed to fetch dataVolumes for namespace from cache.")
return err
}
syncErr, pod := c.sync(vmi, pod, dataVolumes)
err = c.updateStatus(vmi, pod, dataVolumes, syncErr)
if err != nil {
return err
}
if syncErr != nil && syncErr.RequiresRequeue() {
return syncErr
}
return nil
}
// When a pod is created, enqueue the vmi that manages it and update its podExpectations.
func (c *Controller) addPod(obj interface{}) {
pod := obj.(*k8sv1.Pod)
if pod.DeletionTimestamp != nil {
// on a restart of the controller manager, it's possible a new pod shows up in a state that
// is already pending deletion. Prevent the pod from being a creation observation.
c.onPodDelete(pod)
return
}
controllerRef := v1.GetControllerOf(pod)
vmi := c.resolveControllerRef(pod.Namespace, controllerRef)
if vmi == nil {
return
}
vmiKey, err := controller.KeyFunc(vmi)
if err != nil {
return
}
log.Log.V(4).Object(pod).Infof("Pod created")
c.podExpectations.CreationObserved(vmiKey)
c.enqueueVirtualMachine(vmi)
}
// When a pod is updated, figure out what vmi/s manage it and wake them
// up. If the labels of the pod have changed we need to awaken both the old
// and new vmi. old and cur must be *v1.Pod types.
func (c *Controller) updatePod(old, cur interface{}) {
curPod := cur.(*k8sv1.Pod)
oldPod := old.(*k8sv1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs.
return
}
if curPod.DeletionTimestamp != nil {
labelChanged := !equality.Semantic.DeepEqual(curPod.Labels, oldPod.Labels)
// having a pod marked for deletion is enough to count as a deletion expectation
c.onPodDelete(curPod)
if labelChanged {
// we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
c.onPodDelete(oldPod)
}
return
}
curControllerRef := v1.GetControllerOf(curPod)
oldControllerRef := v1.GetControllerOf(oldPod)
controllerRefChanged := !equality.Semantic.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged {
// The ControllerRef was changed. Sync the old controller, if any.
if vmi := c.resolveControllerRef(oldPod.Namespace, oldControllerRef); vmi != nil {
c.enqueueVirtualMachine(vmi)
}
}
vmi := c.resolveControllerRef(curPod.Namespace, curControllerRef)
if vmi == nil {
return
}
log.Log.V(4).Object(curPod).Infof("Pod updated")
c.enqueueVirtualMachine(vmi)
}
// When a pod is deleted, enqueue the vmi that manages the pod and update its podExpectations.
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (c *Controller) onPodDelete(obj interface{}) {
pod, ok := obj.(*k8sv1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new vmi will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf(tombstoneGetObjectErrFmt, obj)).Error(deleteNotifFailed)
return
}
pod, ok = tombstone.Obj.(*k8sv1.Pod)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a pod %#v", obj)).Error(deleteNotifFailed)
return
}
}
controllerRef := v1.GetControllerOf(pod)
vmi := c.resolveControllerRef(pod.Namespace, controllerRef)
if vmi == nil {
return
}
vmiKey, err := controller.KeyFunc(vmi)
if err != nil {
return
}
c.podExpectations.DeletionObserved(vmiKey, controller.PodKey(pod))
c.enqueueVirtualMachine(vmi)
}
func (c *Controller) addVirtualMachineInstance(obj interface{}) {
c.lowerVMIExpectation(obj)
c.enqueueVirtualMachine(obj)
}
func (c *Controller) deleteVirtualMachineInstance(obj interface{}) {
vmi, ok := obj.(*virtv1.VirtualMachineInstance)
// When a delete is dropped, the relist will notice a vmi in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Log.Reason(fmt.Errorf(tombstoneGetObjectErrFmt, obj)).Error(deleteNotifFailed)
return
}
vmi, ok = tombstone.Obj.(*virtv1.VirtualMachineInstance)
if !ok {
log.Log.Reason(fmt.Errorf("tombstone contained object that is not a vmi %#v", obj)).Error(deleteNotifFailed)
return
}
}
c.lowerVMIExpectation(vmi)
c.enqueueVirtualMachine(vmi)
}
func (c *Controller) updateVirtualMachineInstance(_, curr interface{}) {
c.lowerVMIExpectation(curr)
c.enqueueVirtualMachine(curr)
}
func (c *Controller) lowerVMIExpectation(curr interface{}) {
key, err := controller.KeyFunc(curr)
if err != nil {
return
}
c.vmiExpectations.SetExpectations(key, 0, 0)
}
func (c *Controller) enqueueVirtualMachine(obj interface{}) {
logger := log.Log
vmi := obj.(*virtv1.VirtualMachineInstance)
key, err := controller.KeyFunc(vmi)
if err != nil {
logger.Object(vmi).Reason(err).Error("Failed to extract key from virtualmachine.")
return
}
c.Queue.Add(key)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *v1.OwnerReference) *virtv1.VirtualMachineInstance {
if controllerRef != nil && controllerRef.Kind == "Pod" {
// This could be an attachment pod, look up the pod, and check if it is owned by a VMI.
obj, exists, err := c.podIndexer.GetByKey(controller.NamespacedKey(namespace, controllerRef.Name))
if err != nil {
return nil
}
if !exists {
return nil
}
pod, _ := obj.(*k8sv1.Pod)
controllerRef = v1.GetControllerOf(pod)
}
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it is nil or the wrong Kind.
if controllerRef == nil || controllerRef.Kind != virtv1.VirtualMachineInstanceGroupVersionKind.Kind {
return nil
}
vmi, exists, err := c.vmiIndexer.GetByKey(controller.NamespacedKey(namespace, controllerRef.Name))
if err != nil {
return nil
}
if !exists {
return nil
}
if vmi.(*virtv1.VirtualMachineInstance).UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return vmi.(*virtv1.VirtualMachineInstance)
}
func (c *Controller) volumeStatusContainsVolumeAndPod(volumeStatus []virtv1.VolumeStatus, volume *virtv1.Volume) bool {
for _, status := range volumeStatus {
if status.Name == volume.Name && status.HotplugVolume != nil && status.HotplugVolume.AttachPodName != "" {
return true
}
}
return false
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package vmi
import (
"errors"
"fmt"
"sort"
"time"
k8sv1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/pointer"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
"kubevirt.io/kubevirt/pkg/virt-controller/watch/common"
)
func needsHandleHotplug(hotplugVolumes []*v1.Volume, hotplugAttachmentPods []*k8sv1.Pod) bool {
if len(hotplugAttachmentPods) > 1 {
return true
}
// Determine if the ready volumes have changed compared to the current pod
if len(hotplugAttachmentPods) == 1 && podVolumesMatchesReadyVolumes(hotplugAttachmentPods[0], hotplugVolumes) {
return false
}
return len(hotplugVolumes) > 0 || len(hotplugAttachmentPods) > 0
}
func getActiveAndOldAttachmentPods(readyHotplugVolumes []*v1.Volume, hotplugAttachmentPods []*k8sv1.Pod) (*k8sv1.Pod, []*k8sv1.Pod) {
var currentPod *k8sv1.Pod
oldPods := make([]*k8sv1.Pod, 0)
for _, attachmentPod := range hotplugAttachmentPods {
if !podVolumesMatchesReadyVolumes(attachmentPod, readyHotplugVolumes) {
oldPods = append(oldPods, attachmentPod)
} else {
// don't consider attachementPod if it is marked for deletion
if attachmentPod.DeletionTimestamp == nil {
currentPod = attachmentPod
}
}
}
sort.Slice(oldPods, func(i, j int) bool {
return oldPods[i].CreationTimestamp.Time.After(oldPods[j].CreationTimestamp.Time)
})
return currentPod, oldPods
}
// cleanupAttachmentPods deletes all old attachment pods when the following is true
// 1. There is a currentPod that is running. (not nil and phase.Status == Running)
// 2. There are no readyVolumes (numReadyVolumes == 0)
// 3. The newest oldPod is not running and not marked for deletion.
// If any of those are true, it will not delete the newest oldPod, since that one is the latest
// pod that is closest to the desired state.
func (c *Controller) cleanupAttachmentPods(currentPod *k8sv1.Pod, oldPods []*k8sv1.Pod, vmi *v1.VirtualMachineInstance, numReadyVolumes int) common.SyncError {
foundRunning := false
var statusMap = make(map[string]v1.VolumeStatus)
for _, vs := range vmi.Status.VolumeStatus {
if vs.HotplugVolume != nil {
statusMap[vs.Name] = vs
}
}
for _, vmiVolume := range vmi.Spec.Volumes {
if storagetypes.IsHotplugVolume(&vmiVolume) {
delete(statusMap, vmiVolume.Name)
}
}
currentPodIsNotRunning := currentPod == nil || currentPod.Status.Phase != k8sv1.PodRunning
for _, attachmentPod := range oldPods {
if !foundRunning &&
attachmentPod.Status.Phase == k8sv1.PodRunning && attachmentPod.DeletionTimestamp == nil &&
numReadyVolumes > 0 &&
currentPodIsNotRunning {
foundRunning = true
continue
}
volumesNotReadyForDelete := 0
for _, podVolume := range attachmentPod.Spec.Volumes {
volumeStatus, ok := statusMap[podVolume.Name]
if ok && !volumeReadyForPodDelete(volumeStatus.Phase) {
volumesNotReadyForDelete++
}
}
if volumesNotReadyForDelete > 0 {
log.Log.Object(vmi).V(3).Infof("Not deleting attachment pod %s, because there are still %d volumes to be unmounted", attachmentPod.Name, volumesNotReadyForDelete)
continue
}
if err := c.deleteAttachmentPod(vmi, attachmentPod); err != nil {
return common.NewSyncError(fmt.Errorf("Error deleting attachment pod %v", err), controller.FailedDeletePodReason)
}
log.Log.Object(vmi).V(3).Infof("Deleted attachment pod %s", attachmentPod.Name)
}
return nil
}
func volumeReadyForPodDelete(phase v1.VolumePhase) bool {
switch phase {
case v1.VolumeReady:
return false
case v1.HotplugVolumeMounted:
return false
}
return true
}
func (c *Controller) isUtilityVolumeWithBlockPVC(vmi *v1.VirtualMachineInstance, volume *v1.Volume) (bool, error) {
isUtilityVolume := false
for _, utilityVolume := range vmi.Spec.UtilityVolumes {
if utilityVolume.Name == volume.Name {
isUtilityVolume = true
break
}
}
if !isUtilityVolume {
return false, nil
}
pvcInterface, pvcExists, _ := c.pvcIndexer.GetByKey(fmt.Sprintf("%s/%s", vmi.Namespace, volume.PersistentVolumeClaim.ClaimName))
if !pvcExists {
return false, fmt.Errorf("utility volume %s references PVC %s which does not exist", volume.Name, volume.PersistentVolumeClaim.ClaimName)
}
pvc := pvcInterface.(*k8sv1.PersistentVolumeClaim)
return storagetypes.IsPVCBlock(pvc.Spec.VolumeMode), nil
}
func (c *Controller) handleHotplugVolumes(hotplugVolumes []*v1.Volume, hotplugAttachmentPods []*k8sv1.Pod, vmi *v1.VirtualMachineInstance, virtLauncherPod *k8sv1.Pod, dataVolumes []*cdiv1.DataVolume) common.SyncError {
logger := log.Log.Object(vmi)
readyHotplugVolumes := make([]*v1.Volume, 0)
// Find all ready volumes
for _, volume := range hotplugVolumes {
isUtilityVolumeWithBlockPVC, err := c.isUtilityVolumeWithBlockPVC(vmi, volume)
if err != nil {
return common.NewSyncError(err, controller.PVCNotReadyReason)
}
if isUtilityVolumeWithBlockPVC {
logger.V(3).Infof("Skipping utility volume %s: configured with block volume mode PVC, utility volumes require filesystem volume mode", volume.Name)
continue
}
ready, wffc, err := storagetypes.VolumeReadyToAttachToNode(vmi.Namespace, *volume, dataVolumes, c.dataVolumeIndexer, c.pvcIndexer)
if err != nil {
return common.NewSyncError(fmt.Errorf("Error determining volume status %v", err), controller.PVCNotReadyReason)
}
if wffc {
// Volume in WaitForFirstConsumer, it has not been populated by CDI yet. create a dummy pod
logger.V(1).Infof("Volume %s/%s is in WaitForFistConsumer, triggering population", vmi.Namespace, volume.Name)
syncError := c.triggerHotplugPopulation(volume, vmi, virtLauncherPod)
if syncError != nil {
return syncError
}
continue
}
if !ready {
// Volume not ready, skip until it is.
logger.V(3).Infof("Skipping hotplugged volume: %s, not ready", volume.Name)
continue
}
readyHotplugVolumes = append(readyHotplugVolumes, volume)
}
currentPod, oldPods := getActiveAndOldAttachmentPods(readyHotplugVolumes, hotplugAttachmentPods)
if currentPod == nil && !hasPendingPods(oldPods) && len(readyHotplugVolumes) > 0 {
if rateLimited, waitTime := c.requeueAfter(oldPods, time.Duration(len(readyHotplugVolumes)/-10)); rateLimited {
key, err := controller.KeyFunc(vmi)
if err != nil {
logger.Object(vmi).Reason(err).Error("failed to extract key from virtualmachine.")
return common.NewSyncError(fmt.Errorf("failed to extract key from virtualmachine. %v", err), controller.FailedHotplugSyncReason)
}
c.Queue.AddAfter(key, waitTime)
} else {
if newPod, err := c.createAttachmentPod(vmi, virtLauncherPod, readyHotplugVolumes); err != nil {
return err
} else {
currentPod = newPod
}
}
}
if err := c.cleanupAttachmentPods(currentPod, oldPods, vmi, len(readyHotplugVolumes)); err != nil {
return err
}
return nil
}
func (c *Controller) createAttachmentPod(vmi *v1.VirtualMachineInstance, virtLauncherPod *k8sv1.Pod, volumes []*v1.Volume) (*k8sv1.Pod, common.SyncError) {
attachmentPodTemplate, _ := c.createAttachmentPodTemplate(vmi, virtLauncherPod, volumes)
if attachmentPodTemplate == nil {
return nil, nil
}
vmiKey := controller.VirtualMachineInstanceKey(vmi)
pod, err := c.createPod(vmiKey, vmi.Namespace, attachmentPodTemplate)
if err != nil {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedCreatePodReason, "Error creating attachment pod: %v", err)
return nil, common.NewSyncError(fmt.Errorf("Error creating attachment pod %v", err), controller.FailedCreatePodReason)
}
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, controller.SuccessfulCreatePodReason, "Created attachment pod %s", pod.Name)
return pod, nil
}
func (c *Controller) triggerHotplugPopulation(volume *v1.Volume, vmi *v1.VirtualMachineInstance, virtLauncherPod *k8sv1.Pod) common.SyncError {
populateHotplugPodTemplate, err := c.createAttachmentPopulateTriggerPodTemplate(volume, virtLauncherPod, vmi)
if err != nil {
return common.NewSyncError(fmt.Errorf("Error creating trigger pod template %v", err), controller.FailedCreatePodReason)
}
if populateHotplugPodTemplate != nil { // nil means the PVC is not populated yet.
vmiKey := controller.VirtualMachineInstanceKey(vmi)
_, err = c.createPod(vmiKey, vmi.Namespace, populateHotplugPodTemplate)
if err != nil {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedCreatePodReason, "Error creating hotplug population trigger pod for volume %s: %v", volume.Name, err)
return common.NewSyncError(fmt.Errorf("Error creating hotplug population trigger pod %v", err), controller.FailedCreatePodReason)
}
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, controller.SuccessfulCreatePodReason, "Created hotplug trigger pod for volume %s", volume.Name)
}
return nil
}
func syncHotplugCondition(vmi *v1.VirtualMachineInstance, conditionType v1.VirtualMachineInstanceConditionType) {
vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
condition := v1.VirtualMachineInstanceCondition{
Type: conditionType,
Status: k8sv1.ConditionTrue,
}
if !vmiConditions.HasCondition(vmi, condition.Type) {
vmiConditions.UpdateCondition(vmi, &condition)
log.Log.Object(vmi).V(4).Infof("adding hotplug condition %s", conditionType)
}
}
func canMoveToAttachedPhase(currentPhase v1.VolumePhase) bool {
return currentPhase == "" || currentPhase == v1.VolumeBound || currentPhase == v1.VolumePending
}
func findAttachmentPodByVolumeName(volumeName string, attachmentPods []*k8sv1.Pod) *k8sv1.Pod {
for _, pod := range attachmentPods {
for _, podVolume := range pod.Spec.Volumes {
if podVolume.Name == volumeName {
return pod
}
}
}
return nil
}
func (c *Controller) createAttachmentPodTemplate(vmi *v1.VirtualMachineInstance, virtlauncherPod *k8sv1.Pod, volumes []*v1.Volume) (*k8sv1.Pod, error) {
logger := log.Log.Object(vmi)
volumeNamesPVCMap, err := storagetypes.VirtVolumesToPVCMap(volumes, c.pvcIndexer, virtlauncherPod.Namespace)
if err != nil {
return nil, fmt.Errorf("failed to get PVC map: %v", err)
}
for volumeName, pvc := range volumeNamesPVCMap {
//Verify the PVC is ready to be used.
populated, err := cdiv1.IsSucceededOrPendingPopulation(pvc, func(name, namespace string) (*cdiv1.DataVolume, error) {
dv, exists, _ := c.dataVolumeIndexer.GetByKey(fmt.Sprintf("%s/%s", namespace, name))
if !exists {
return nil, fmt.Errorf("unable to find datavolume %s/%s", namespace, name)
}
return dv.(*cdiv1.DataVolume), nil
})
if err != nil {
return nil, err
}
if !populated {
logger.Infof("Unable to hotplug, claim %s found, but not ready", pvc.Name)
delete(volumeNamesPVCMap, volumeName)
}
}
if len(volumeNamesPVCMap) > 0 {
return c.templateService.RenderHotplugAttachmentPodTemplate(volumes, virtlauncherPod, vmi, volumeNamesPVCMap)
}
return nil, err
}
func (c *Controller) createAttachmentPopulateTriggerPodTemplate(volume *v1.Volume, virtlauncherPod *k8sv1.Pod, vmi *v1.VirtualMachineInstance) (*k8sv1.Pod, error) {
claimName := storagetypes.PVCNameFromVirtVolume(volume)
if claimName == "" {
return nil, errors.New("Unable to hotplug, claim not PVC or Datavolume")
}
pvc, exists, isBlock, err := storagetypes.IsPVCBlockFromStore(c.pvcIndexer, virtlauncherPod.Namespace, claimName)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("Unable to trigger hotplug population, claim %s not found", claimName)
}
pod, err := c.templateService.RenderHotplugAttachmentTriggerPodTemplate(volume, virtlauncherPod, vmi, pvc.Name, isBlock, true)
return pod, err
}
func (c *Controller) deleteAllAttachmentPods(vmi *v1.VirtualMachineInstance) error {
virtlauncherPod, err := controller.CurrentVMIPod(vmi, c.podIndexer)
if err != nil {
return err
}
if virtlauncherPod != nil {
attachmentPods, err := controller.AttachmentPods(virtlauncherPod, c.podIndexer)
if err != nil {
return err
}
for _, attachmentPod := range attachmentPods {
err := c.deleteAttachmentPod(vmi, attachmentPod)
if err != nil && !k8serrors.IsNotFound(err) {
return err
}
}
}
return nil
}
func (c *Controller) deleteOrphanedAttachmentPods(vmi *v1.VirtualMachineInstance) error {
pods, err := c.listPodsFromNamespace(vmi.Namespace)
if err != nil {
return fmt.Errorf("failed to list pods from namespace %s: %v", vmi.Namespace, err)
}
for _, pod := range pods {
if !metav1.IsControlledBy(pod, vmi) {
continue
}
if !controller.PodIsDown(pod) {
continue
}
attachmentPods, err := controller.AttachmentPods(pod, c.podIndexer)
if err != nil {
log.Log.Reason(err).Errorf("failed to get attachment pods %s: %v", controller.PodKey(pod), err)
// do not return; continue the cleanup...
continue
}
for _, attachmentPod := range attachmentPods {
if err := c.deleteAttachmentPod(vmi, attachmentPod); err != nil {
log.Log.Reason(err).Errorf("failed to delete attachment pod %s: %v", controller.PodKey(attachmentPod), err)
// do not return; continue the cleanup...
}
}
}
return nil
}
func (c *Controller) deleteAttachmentPod(vmi *v1.VirtualMachineInstance, attachmentPod *k8sv1.Pod) error {
if attachmentPod.DeletionTimestamp != nil {
return nil
}
vmiKey := controller.VirtualMachineInstanceKey(vmi)
err := c.deletePod(vmiKey, attachmentPod, metav1.DeleteOptions{
GracePeriodSeconds: pointer.P(int64(0)),
})
if err != nil {
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, controller.FailedDeletePodReason, "Failed to delete attachment pod %s", attachmentPod.Name)
return err
}
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, controller.SuccessfulDeletePodReason, "Deleted attachment pod %s", attachmentPod.Name)
return nil
}
func podVolumesMatchesReadyVolumes(attachmentPod *k8sv1.Pod, volumes []*v1.Volume) bool {
// -2 for empty dir and token
if len(attachmentPod.Spec.Volumes)-2 != len(volumes) {
return false
}
podVolumeMap := make(map[string]k8sv1.Volume)
for _, volume := range attachmentPod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
podVolumeMap[volume.Name] = volume
}
}
for _, volume := range volumes {
delete(podVolumeMap, volume.Name)
}
return len(podVolumeMap) == 0
}
func hasPendingPods(pods []*k8sv1.Pod) bool {
for _, pod := range pods {
if pod.Status.Phase == k8sv1.PodRunning || pod.Status.Phase == k8sv1.PodSucceeded || pod.Status.Phase == k8sv1.PodFailed {
continue
}
return true
}
return false
}
func (c *Controller) requeueAfter(oldPods []*k8sv1.Pod, threshold time.Duration) (bool, time.Duration) {
if len(oldPods) > 0 && oldPods[0].CreationTimestamp.Time.After(time.Now().Add(-1*threshold)) {
return true, threshold - time.Since(oldPods[0].CreationTimestamp.Time)
}
return false, 0
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package volumemigration
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
k8sv1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
virtv1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
backendstorage "kubevirt.io/kubevirt/pkg/storage/backend-storage"
storagetypes "kubevirt.io/kubevirt/pkg/storage/types"
)
const InvalidUpdateErrMsg = "The volume can only be reverted to the previous version during the update"
// invalidVols includes the invalid volumes for the volume migration
type invalidVols struct {
hotplugged []string
fs []string
shareable []string
luns []string
noCSIDVs []string
}
func (vols *invalidVols) errorMessage() error {
var s strings.Builder
if len(vols.hotplugged) < 1 && len(vols.fs) < 1 &&
len(vols.shareable) < 1 && len(vols.luns) < 1 && len(vols.noCSIDVs) < 1 {
return nil
}
s.WriteString("invalid volumes to update with migration:")
if len(vols.hotplugged) > 0 {
s.WriteString(fmt.Sprintf(" hotplugged: %v", vols.hotplugged))
}
if len(vols.fs) > 0 {
s.WriteString(fmt.Sprintf(" filesystems: %v", vols.fs))
}
if len(vols.shareable) > 0 {
s.WriteString(fmt.Sprintf(" shareable: %v", vols.shareable))
}
if len(vols.luns) > 0 {
s.WriteString(fmt.Sprintf(" luns: %v", vols.luns))
}
if len(vols.noCSIDVs) > 0 {
s.WriteString(fmt.Sprintf(" DV storage class isn't a CSI or not using volume populators: %v", vols.noCSIDVs))
}
return fmt.Errorf("%s", s.String())
}
// updatedVolumesMapping returns a mapping with the volume names and the old claims that have been updated in the VM
func updatedVolumesMapping(vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) map[string]string {
updateVols := make(map[string]string)
vmVols := make(map[string]string)
// New volumes
for _, v := range vm.Spec.Template.Spec.Volumes {
if name := storagetypes.PVCNameFromVirtVolume(&v); name != "" {
vmVols[v.Name] = name
}
}
// Old volumes
for _, v := range vmi.Spec.Volumes {
name := storagetypes.PVCNameFromVirtVolume(&v)
if name == "" {
continue
}
if claim, ok := vmVols[v.Name]; ok && name != claim {
updateVols[v.Name] = claim
}
}
return updateVols
}
// PersistentVolumesUpdated checks only volumes that exist in VM AND VMI for claim changes
func PersistentVolumesUpdated(vmSpec, vmiSpec *virtv1.VirtualMachineInstanceSpec) bool {
vmiVolumesByName := storagetypes.GetVolumesByName(vmiSpec)
for _, vmVol := range vmSpec.Volumes {
vmClaimName := storagetypes.PVCNameFromVirtVolume(&vmVol)
vmiVol, exists := vmiVolumesByName[vmVol.Name]
if vmClaimName == "" || !exists {
continue
}
vmiClaimName := storagetypes.PVCNameFromVirtVolume(vmiVol)
if vmiClaimName == "" {
continue
}
if vmiClaimName != vmClaimName {
return true
}
}
return false
}
// ValidateVolumes checks that the volumes can be updated with the migration
func ValidateVolumes(vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine, dvStore, pvcStore cache.Store) error {
var invalidVols invalidVols
if vmi == nil {
return fmt.Errorf("cannot validate the migrated volumes for an empty VMI")
}
if vm == nil {
return fmt.Errorf("cannot validate the migrated volumes for an empty VM")
}
updatedVols := updatedVolumesMapping(vmi, vm)
valid := true
disks := storagetypes.GetDisksByName(&vmi.Spec)
filesystems := storagetypes.GetFilesystemsFromVolumes(vmi)
for _, v := range vm.Spec.Template.Spec.Volumes {
_, ok := updatedVols[v.Name]
if !ok {
continue
}
// Filesystems
if _, ok := filesystems[v.Name]; ok {
invalidVols.fs = append(invalidVols.fs, v.Name)
valid = false
continue
}
d, ok := disks[v.Name]
if !ok {
continue
}
// Shareable disks
if d.Shareable != nil && *d.Shareable {
invalidVols.shareable = append(invalidVols.shareable, v.Name)
valid = false
continue
}
// LUN disks
if d.DiskDevice.LUN != nil {
invalidVols.luns = append(invalidVols.luns, v.Name)
valid = false
continue
}
// DataVolumes with a no-csi storage class
if v.VolumeSource.DataVolume != nil {
dv, err := storagetypes.GetDataVolumeFromCache(vm.Namespace, v.VolumeSource.DataVolume.Name, dvStore)
if err != nil {
return err
}
if dv == nil {
return storagetypes.NewDVNotFoundError(v.VolumeSource.DataVolume.Name)
}
// if the dv is in succeeded state then it is safe to use since it has already been populated.
if dv.Status.Phase == cdiv1.Succeeded {
continue
}
pvc, err := storagetypes.GetPersistentVolumeClaimFromCache(vm.Namespace, dv.Name, pvcStore)
if err != nil {
return err
}
if pvc == nil {
return storagetypes.NewPVCNotFoundError(v.VolumeSource.DataVolume.Name)
}
// The dataSourceRef is set if the volume populators are supported
if pvc.Spec.DataSourceRef == nil {
invalidVols.noCSIDVs = append(invalidVols.noCSIDVs, v.Name)
valid = false
}
}
}
if !valid {
return invalidVols.errorMessage()
}
return nil
}
// VolumeMigrationCancel cancels the volume migraton
func VolumeMigrationCancel(clientset kubecli.KubevirtClient, vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) (bool, error) {
if !IsVolumeMigrating(vmi) || !changeMigratedVolumes(vmi, vm) {
return false, nil
}
// A volumem migration can be canceled only if the original set of volumes is restored
if revertedToOldVolumes(vmi, vm) {
vmiCopy, err := PatchVMIVolumes(clientset, vmi, vm)
if err != nil {
return true, err
}
return true, cancelVolumeMigration(clientset, vmiCopy)
}
return true, fmt.Errorf(InvalidUpdateErrMsg)
}
func changeMigratedVolumes(vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) bool {
updatedVols := updatedVolumesMapping(vmi, vm)
for _, migVol := range vmi.Status.MigratedVolumes {
if _, ok := updatedVols[migVol.VolumeName]; ok {
return true
}
}
return false
}
// revertedToOldVolumes checks that all migrated volumes have been reverted from destination to the source volume
func revertedToOldVolumes(vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) bool {
updatedVols := updatedVolumesMapping(vmi, vm)
for _, migVol := range vmi.Status.MigratedVolumes {
if migVol.SourcePVCInfo == nil {
// something wrong with the source volume
return false
}
claim, ok := updatedVols[migVol.VolumeName]
if !ok || migVol.SourcePVCInfo.ClaimName != claim {
return false
}
delete(updatedVols, migVol.VolumeName)
}
// updatedVols should only include the source volumes and not additional volumes.
return len(updatedVols) == 0
}
func cancelVolumeMigration(clientset kubecli.KubevirtClient, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil {
return fmt.Errorf("vmi is empty")
}
log.Log.V(2).Object(vmi).Infof("Cancel volume migration")
vmiConditions := controller.NewVirtualMachineInstanceConditionManager()
vmiCopy := vmi.DeepCopy()
vmiConditions.UpdateCondition(vmiCopy, &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceVolumesChange,
LastTransitionTime: metav1.Now(),
Status: k8sv1.ConditionFalse,
Reason: virtv1.VirtualMachineInstanceReasonVolumesChangeCancellation,
})
vmiCopy.Status.MigratedVolumes = nil
if equality.Semantic.DeepEqual(vmiCopy.Status, vmi.Status) {
return nil
}
log.Log.V(2).Object(vmi).Infof("Patch VMI %s status to cancel the volume migration", vmi.Name)
p, err := patch.New(
patch.WithTest("/status/conditions", vmi.Status.Conditions),
patch.WithReplace("/status/conditions", vmiCopy.Status.Conditions),
patch.WithTest("/status/migratedVolumes", vmi.Status.MigratedVolumes),
patch.WithReplace("/status/migratedVolumes", vmiCopy.Status.MigratedVolumes),
).GeneratePayload()
if err != nil {
return err
}
_, err = clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, p, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("failed updating vmi condition: %v", err)
}
return nil
}
// IsVolumeMigrating checks the VMI condition for volume migration
func IsVolumeMigrating(vmi *virtv1.VirtualMachineInstance) bool {
return controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatus(vmi,
virtv1.VirtualMachineInstanceVolumesChange, k8sv1.ConditionTrue)
}
func GenerateMigratedVolumes(pvcStore cache.Store, vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) ([]virtv1.StorageMigratedVolumeInfo, error) {
var migVolsInfo []virtv1.StorageMigratedVolumeInfo
oldVols := make(map[string]string)
for _, v := range vmi.Spec.Volumes {
if pvcName := storagetypes.PVCNameFromVirtVolume(&v); pvcName != "" {
oldVols[v.Name] = pvcName
}
}
for _, v := range vm.Spec.Template.Spec.Volumes {
claim := storagetypes.PVCNameFromVirtVolume(&v)
if claim == "" {
continue
}
oldClaim, ok := oldVols[v.Name]
if !ok {
continue
}
if oldClaim == claim {
continue
}
oldPvc, err := storagetypes.GetPersistentVolumeClaimFromCache(vmi.Namespace, oldClaim, pvcStore)
if err != nil {
return nil, err
}
pvc, err := storagetypes.GetPersistentVolumeClaimFromCache(vmi.Namespace, claim, pvcStore)
if err != nil {
return nil, err
}
var oldVolMode *k8sv1.PersistentVolumeMode
var volMode *k8sv1.PersistentVolumeMode
if oldPvc != nil && oldPvc.Spec.VolumeMode != nil {
oldVolMode = oldPvc.Spec.VolumeMode
}
if pvc != nil && pvc.Spec.VolumeMode != nil {
volMode = pvc.Spec.VolumeMode
}
migVolsInfo = append(migVolsInfo, virtv1.StorageMigratedVolumeInfo{
VolumeName: v.Name,
DestinationPVCInfo: &virtv1.PersistentVolumeClaimInfo{
ClaimName: claim,
VolumeMode: volMode,
},
SourcePVCInfo: &virtv1.PersistentVolumeClaimInfo{
ClaimName: oldClaim,
VolumeMode: oldVolMode,
},
})
}
return migVolsInfo, nil
}
// PatchVMIStatusWithMigratedVolumes patches the VMI status with the source and destination volume information during the volume migration
func PatchVMIStatusWithMigratedVolumes(clientset kubecli.KubevirtClient, migVolsInfo []v1.StorageMigratedVolumeInfo, vmi *virtv1.VirtualMachineInstance) error {
if len(vmi.Status.MigratedVolumes) > 0 {
return nil
}
if equality.Semantic.DeepEqual(migVolsInfo, vmi.Status.MigratedVolumes) {
return nil
}
patch, err := patch.New(
patch.WithTest("/status/migratedVolumes", vmi.Status.MigratedVolumes),
patch.WithReplace("/status/migratedVolumes", migVolsInfo),
).GeneratePayload()
if err != nil {
return err
}
vmi, err = clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
return err
}
// PatchVMIVolumes replaces the VMI volumes with the migrated volumes
func PatchVMIVolumes(clientset kubecli.KubevirtClient, vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) (*virtv1.VirtualMachineInstance, error) {
if vmi == nil || vm == nil {
return nil, fmt.Errorf("cannot patch the volumes for an empty VMI or VM")
}
log.Log.V(2).Object(vmi).Infof("Patch VMI volumes")
migVols := make(map[string]bool)
vmiCopy := vmi.DeepCopy()
if len(vmi.Status.MigratedVolumes) == 0 {
return vmiCopy, nil
}
vmVols := storagetypes.GetVolumesByName(&vm.Spec.Template.Spec)
for _, migVol := range vmi.Status.MigratedVolumes {
migVols[migVol.VolumeName] = true
}
for i, v := range vmi.Spec.Volumes {
if _, ok := migVols[v.Name]; ok {
if vol, ok := vmVols[v.Name]; ok {
vmiCopy.Spec.Volumes[i] = *vol
}
}
}
if equality.Semantic.DeepEqual(vmi.Spec.Volumes, vmiCopy.Spec.Volumes) {
return vmiCopy, nil
}
patch, err := patch.New(
patch.WithTest("/spec/volumes", vmi.Spec.Volumes),
patch.WithReplace("/spec/volumes", vmiCopy.Spec.Volumes),
).GeneratePayload()
if err != nil {
return nil, err
}
return clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType, patch, metav1.PatchOptions{})
}
// ValidateVolumesUpdateMigration checks if the VMI can be update with the volume migration. For example, for certain VMs, the migration is not allowed for other reasons then the storage
func ValidateVolumesUpdateMigration(vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine, migVolsInfo []virtv1.StorageMigratedVolumeInfo) error {
if vmi == nil {
return fmt.Errorf("VMI is empty")
}
if len(migVolsInfo) == 0 {
return nil
}
// Check if there are other reasons rather than the DisksNotLiveMigratable
for _, cond := range vmi.Status.Conditions {
if cond.Type == virtv1.VirtualMachineInstanceIsStorageLiveMigratable &&
cond.Status == k8sv1.ConditionFalse {
return fmt.Errorf("cannot migrate the volumes as the VMI isn't migratable: %s", cond.Message)
}
}
// Check that all RWO volumes will be copied
volMigMap := make(map[string]bool)
for _, v := range migVolsInfo {
volMigMap[v.VolumeName] = true
}
persistBackendVolName := backendstorage.CurrentPVCName(vmi)
for _, v := range vmi.Status.VolumeStatus {
if v.PersistentVolumeClaimInfo == nil {
continue
}
// Skip the check for the persistent VM state, this is handled differently then the other PVCs
if v.Name == persistBackendVolName {
continue
}
_, ok := volMigMap[v.Name]
if storagetypes.IsReadWriteOnceAccessMode(v.PersistentVolumeClaimInfo.AccessModes) && !ok {
return fmt.Errorf("cannot migrate the VM. The volume %s is RWO and not included in the migration volumes", v.Name)
}
}
return nil
}
func patchConditions(clientset kubecli.KubevirtClient, vmi, vmiCopy *virtv1.VirtualMachineInstance) error {
if equality.Semantic.DeepEqual(vmi.Status.Conditions, vmiCopy.Status.Conditions) {
return nil
}
p, err := patch.New(
patch.WithTest("/status/conditions", vmi.Status.Conditions),
patch.WithReplace("/status/conditions", vmiCopy.Status.Conditions),
).GeneratePayload()
if err != nil {
return err
}
_, err = clientset.VirtualMachineInstance(vmi.Namespace).Patch(context.Background(), vmi.Name, types.JSONPatchType,
p, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("failed updating vmi condition: %v", err)
}
return nil
}
func SetVolumesChangeCondition(clientset kubecli.KubevirtClient, vmi *virtv1.VirtualMachineInstance,
status k8sv1.ConditionStatus, msg string) error {
if vmi == nil {
return fmt.Errorf("vmi is empty")
}
vmiCopy := vmi.DeepCopy()
controller.NewVirtualMachineInstanceConditionManager().UpdateCondition(vmiCopy, &virtv1.VirtualMachineInstanceCondition{
Type: virtv1.VirtualMachineInstanceVolumesChange,
LastTransitionTime: metav1.Now(),
Status: status,
Message: msg,
})
return patchConditions(clientset, vmi, vmiCopy)
}
func UnsetVolumeChangeCondition(clientset kubecli.KubevirtClient, vmi *virtv1.VirtualMachineInstance) error {
if vmi == nil {
return fmt.Errorf("vmi is empty")
}
vmiCopy := vmi.DeepCopy()
controller.NewVirtualMachineInstanceConditionManager().RemoveCondition(vmiCopy,
virtv1.VirtualMachineInstanceVolumesChange)
return patchConditions(clientset, vmi, vmiCopy)
}
package vsock
import (
"fmt"
"math"
"math/rand"
"sync"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/controller"
)
type Allocator interface {
Sync(vmis []*virtv1.VirtualMachineInstance)
Allocate(vmi *virtv1.VirtualMachineInstance) error
Remove(key string)
}
type randCIDFunc func() uint32
type nextCIDFunc func(uint32) uint32
type cidsMap struct {
mu sync.Mutex
cids map[string]uint32
reverse map[uint32]string
randCID randCIDFunc
nextCID nextCIDFunc
}
func NewCIDsMap() *cidsMap {
return &cidsMap{
cids: make(map[string]uint32),
reverse: make(map[uint32]string),
randCID: func() uint32 {
cid := rand.Uint32()
if cid < 3 {
// The guest CID will start from 3
cid += 3
}
return cid
},
nextCID: func(cur uint32) uint32 {
if cur == math.MaxUint32 {
return 3
}
return cur + 1
},
}
}
// Sync loads the allocated CIDs from VMIs.
func (m *cidsMap) Sync(vmis []*virtv1.VirtualMachineInstance) {
m.mu.Lock()
defer m.mu.Unlock()
for _, vmi := range vmis {
if vmi.Status.VSOCKCID == nil {
continue
}
key := controller.VirtualMachineInstanceKey(vmi)
m.cids[key] = *vmi.Status.VSOCKCID
m.reverse[*vmi.Status.VSOCKCID] = key
}
}
// Allocate select a new CID and set it to the status of the given VMI.
func (m *cidsMap) Allocate(vmi *virtv1.VirtualMachineInstance) error {
m.mu.Lock()
defer m.mu.Unlock()
key := controller.VirtualMachineInstanceKey(vmi)
if cid, exist := m.cids[key]; exist {
vmi.Status.VSOCKCID = &cid
return nil
}
start := m.randCID()
assigned := start
for {
if _, exist := m.reverse[assigned]; !exist {
break
}
assigned = m.nextCID(assigned)
if assigned == start {
// Run out of CIDs. Practically this shouldn't happen.
return fmt.Errorf("CIDs exhausted")
}
}
m.cids[key] = assigned
m.reverse[assigned] = key
vmi.Status.VSOCKCID = &assigned
return nil
}
// Remove cleans the CID for given VMI.
func (m *cidsMap) Remove(key string) {
m.mu.Lock()
defer m.mu.Unlock()
if cid, exist := m.cids[key]; exist {
delete(m.reverse, cid)
delete(m.cids, key)
}
}
package workloadupdater
import (
"context"
"fmt"
"math"
"math/rand"
"sync"
"time"
"golang.org/x/time/rate"
k8sv1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8svalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
v1 "kubevirt.io/api/core/v1"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
metrics "kubevirt.io/kubevirt/pkg/monitoring/metrics/virt-controller"
migrationutils "kubevirt.io/kubevirt/pkg/util/migrations"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
volumemig "kubevirt.io/kubevirt/pkg/virt-controller/watch/volume-migration"
)
const (
// FailedCreateVirtualMachineInstanceMigrationReason is added in an event if creating a VirtualMachineInstanceMigration failed.
FailedCreateVirtualMachineInstanceMigrationReason = "FailedCreate"
// SuccessfulCreateVirtualMachineInstanceMigrationReason is added in an event if creating a VirtualMachineInstanceMigration succeeded.
SuccessfulCreateVirtualMachineInstanceMigrationReason = "SuccessfulCreate"
// FailedEvictVirtualMachineInstanceReason is added in an event if a deletion of a VMI fails
FailedEvictVirtualMachineInstanceReason = "FailedEvict"
// SuccessfulEvictVirtualMachineInstanceReason is added in an event if a deletion of a VMI Succeeds
SuccessfulEvictVirtualMachineInstanceReason = "SuccessfulEvict"
// SuccessfulChangeAbortionReason is added in an event if a deletion of a
// migration succeeds
SuccessfulChangeAbortionReason = "SuccessfulChangeAbortion"
// FailedChangeAbortionReason is added in an event if a deletion of a
// migration succeeds
FailedChangeAbortionReason = "FailedChangeAbortion"
)
// time to wait before re-enqueing when outdated VMIs are still detected
const periodicReEnqueueIntervalSeconds = 30
// ensures we don't execute more than once every 5 seconds
const defaultThrottleInterval = 5 * time.Second
const defaultBatchDeletionIntervalSeconds = 60
const defaultBatchDeletionCount = 10
type WorkloadUpdateController struct {
clientset kubecli.KubevirtClient
queue workqueue.TypedRateLimitingInterface[string]
vmiStore cache.Store
podIndexer cache.Indexer
migrationIndexer cache.Indexer
recorder record.EventRecorder
migrationExpectations *controller.UIDTrackingControllerExpectations
kubeVirtStore cache.Store
clusterConfig *virtconfig.ClusterConfig
launcherImage string
lastDeletionBatch time.Time
hasSynced func() bool
}
type updateData struct {
allOutdatedVMIs []*virtv1.VirtualMachineInstance
migratableOutdatedVMIs []*virtv1.VirtualMachineInstance
evictOutdatedVMIs []*virtv1.VirtualMachineInstance
abortChangeVMIs []*virtv1.VirtualMachineInstance
numActiveMigrations int
}
func NewWorkloadUpdateController(
launcherImage string,
vmiInformer cache.SharedIndexInformer,
podInformer cache.SharedIndexInformer,
migrationInformer cache.SharedIndexInformer,
kubeVirtInformer cache.SharedIndexInformer,
recorder record.EventRecorder,
clientset kubecli.KubevirtClient,
clusterConfig *virtconfig.ClusterConfig,
) (*WorkloadUpdateController, error) {
rl := workqueue.NewTypedMaxOfRateLimiter[string](
workqueue.NewTypedItemExponentialFailureRateLimiter[string](defaultThrottleInterval, 300*time.Second),
&workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Every(defaultThrottleInterval), 1)},
)
c := &WorkloadUpdateController{
queue: workqueue.NewTypedRateLimitingQueueWithConfig[string](
rl,
workqueue.TypedRateLimitingQueueConfig[string]{Name: "virt-controller-workload-update"},
),
vmiStore: vmiInformer.GetStore(),
podIndexer: podInformer.GetIndexer(),
migrationIndexer: migrationInformer.GetIndexer(),
kubeVirtStore: kubeVirtInformer.GetStore(),
recorder: recorder,
clientset: clientset,
launcherImage: launcherImage,
migrationExpectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
clusterConfig: clusterConfig,
hasSynced: func() bool {
return migrationInformer.HasSynced() && vmiInformer.HasSynced() && podInformer.HasSynced() && kubeVirtInformer.HasSynced()
},
}
_, err := vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updateVmi,
})
if err != nil {
return nil, err
}
_, err = kubeVirtInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addKubeVirt,
DeleteFunc: c.deleteKubeVirt,
UpdateFunc: c.updateKubeVirt,
})
if err != nil {
return nil, err
}
_, err = migrationInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addMigration,
DeleteFunc: c.deleteMigration,
UpdateFunc: c.updateMigration,
})
if err != nil {
return nil, err
}
return c, nil
}
func (c *WorkloadUpdateController) getKubeVirtKey() (string, error) {
kvs := c.kubeVirtStore.List()
if len(kvs) > 1 {
log.Log.Errorf("More than one KubeVirt custom resource detected: %v", len(kvs))
return "", fmt.Errorf("more than one KubeVirt custom resource detected: %v", len(kvs))
}
if len(kvs) == 1 {
kv := kvs[0].(*virtv1.KubeVirt)
return controller.KeyFunc(kv)
}
return "", nil
}
func (c *WorkloadUpdateController) addMigration(obj interface{}) {
migration, ok := obj.(*virtv1.VirtualMachineInstanceMigration)
if !ok {
return
}
key, err := c.getKubeVirtKey()
if key == "" || err != nil {
return
}
if migration.Annotations != nil {
// only observe the migration expectation if our controller created it
_, ok = migration.Annotations[virtv1.WorkloadUpdateMigrationAnnotation]
if ok {
c.migrationExpectations.CreationObserved(key)
}
}
c.queue.AddAfter(key, defaultThrottleInterval)
}
func (c *WorkloadUpdateController) deleteMigration(_ interface{}) {
key, err := c.getKubeVirtKey()
if key == "" || err != nil {
return
}
c.queue.AddAfter(key, defaultThrottleInterval)
}
func (c *WorkloadUpdateController) updateMigration(_, _ interface{}) {
key, err := c.getKubeVirtKey()
if key == "" || err != nil {
return
}
c.queue.AddAfter(key, defaultThrottleInterval)
}
func (c *WorkloadUpdateController) updateVmi(_, obj interface{}) {
vmi, ok := obj.(*virtv1.VirtualMachineInstance)
if !ok {
return
}
key, err := c.getKubeVirtKey()
if key == "" || err != nil {
return
}
if vmi.IsFinal() {
return
}
if !(isHotplugInProgress(vmi) || isVolumesUpdateInProgress(vmi)) ||
migrationutils.IsMigrating(vmi) {
return
}
c.queue.AddAfter(key, defaultThrottleInterval)
}
func (c *WorkloadUpdateController) addKubeVirt(obj interface{}) {
c.enqueueKubeVirt(obj)
}
func (c *WorkloadUpdateController) deleteKubeVirt(obj interface{}) {
c.enqueueKubeVirt(obj)
}
func (c *WorkloadUpdateController) updateKubeVirt(_, curr interface{}) {
c.enqueueKubeVirt(curr)
}
func (c *WorkloadUpdateController) enqueueKubeVirt(obj interface{}) {
logger := log.Log
kv, ok := obj.(*virtv1.KubeVirt)
if !ok {
return
}
key, err := controller.KeyFunc(kv)
if err != nil {
logger.Object(kv).Reason(err).Error("Failed to extract key from KubeVirt.")
return
}
c.queue.AddAfter(key, defaultThrottleInterval)
}
// Run runs the passed in NodeController.
func (c *WorkloadUpdateController) Run(stopCh <-chan struct{}) {
defer controller.HandlePanic()
defer c.queue.ShutDown()
log.Log.Info("Starting workload update controller.")
// This is hardcoded because there's no reason to make thread count
// configurable. The queue keys off the KubeVirt install object, and
// there can only be a single one of these in a cluster at a time.
threadiness := 1
// Wait for cache sync before we start the controller
cache.WaitForCacheSync(stopCh, c.hasSynced)
// Start the actual work
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
log.Log.Info("Stopping workload update controller.")
}
func (c *WorkloadUpdateController) runWorker() {
for c.Execute() {
}
}
func (c *WorkloadUpdateController) Execute() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.execute(key)
if err != nil {
log.Log.Reason(err).Infof("reenqueuing workload updates for KubeVirt %v", key)
c.queue.AddRateLimited(key)
} else {
log.Log.V(4).Infof("processed workload updates for KubeVirt %v", key)
c.queue.Forget(key)
}
return true
}
func (c *WorkloadUpdateController) isOutdated(vmi *virtv1.VirtualMachineInstance) bool {
if vmi.IsFinal() {
return false
}
// if the launcher image isn't detected yet, that means
// we don't know what the launcher image is yet.
// This could be due to a migration, or the VMI is still
// initializing. virt-controller will set it for us once
// either the VMI is either running or done migrating.
if vmi.Status.LauncherContainerImageVersion == "" {
return false
} else if vmi.Status.LauncherContainerImageVersion != c.launcherImage {
return true
}
return false
}
func isHotplugInProgress(vmi *virtv1.VirtualMachineInstance) bool {
condManager := controller.NewVirtualMachineInstanceConditionManager()
return condManager.HasCondition(vmi, virtv1.VirtualMachineInstanceVCPUChange) ||
condManager.HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceMemoryChange, k8sv1.ConditionTrue) ||
condManager.HasConditionWithStatus(vmi, virtv1.VirtualMachineInstanceMigrationRequired, k8sv1.ConditionTrue)
}
func isVolumesUpdateInProgress(vmi *virtv1.VirtualMachineInstance) bool {
return controller.NewVirtualMachineInstanceConditionManager().HasConditionWithStatus(vmi,
virtv1.VirtualMachineInstanceVolumesChange, k8sv1.ConditionTrue)
}
func (c *WorkloadUpdateController) doesRequireMigration(vmi *virtv1.VirtualMachineInstance) bool {
if vmi.IsFinal() || migrationutils.IsMigrating(vmi) {
return false
}
if metav1.HasAnnotation(vmi.ObjectMeta, v1.WorkloadUpdateMigrationAbortionAnnotation) {
return false
}
if isHotplugInProgress(vmi) {
return true
}
if isVolumesUpdateInProgress(vmi) {
return true
}
return false
}
func (c *WorkloadUpdateController) shouldAbortMigration(vmi *virtv1.VirtualMachineInstance) bool {
numMig := len(migrationutils.ListWorkloadUpdateMigrations(c.migrationIndexer, vmi.Name, vmi.Namespace))
if metav1.HasAnnotation(vmi.ObjectMeta, virtv1.WorkloadUpdateMigrationAbortionAnnotation) {
return numMig > 0
}
if isHotplugInProgress(vmi) {
return false
}
if isVolumesUpdateInProgress(vmi) {
return false
}
if vmi.Status.MigrationState != nil && vmi.Status.MigrationState.TargetNodeDomainReadyTimestamp != nil {
return false
}
return numMig > 0
}
func (c *WorkloadUpdateController) getUpdateData(kv *virtv1.KubeVirt) *updateData {
data := &updateData{}
lookup := make(map[string]bool)
migrations := migrationutils.ListUnfinishedMigrations(c.migrationIndexer)
for _, migration := range migrations {
lookup[migration.Namespace+"/"+migration.Spec.VMIName] = true
}
automatedMigrationAllowed := false
automatedShutdownAllowed := false
for _, method := range kv.Spec.WorkloadUpdateStrategy.WorkloadUpdateMethods {
if method == virtv1.WorkloadUpdateMethodLiveMigrate {
automatedMigrationAllowed = true
} else if method == virtv1.WorkloadUpdateMethodEvict {
automatedShutdownAllowed = true
}
}
runningMigrations := migrationutils.FilterRunningMigrations(migrations)
data.numActiveMigrations = len(runningMigrations)
objs := c.vmiStore.List()
for _, obj := range objs {
vmi := obj.(*virtv1.VirtualMachineInstance)
switch {
case !vmi.IsRunning() || vmi.IsFinal() || vmi.DeletionTimestamp != nil:
// only consider running VMIs that aren't being shutdown
continue
case c.shouldAbortMigration(vmi) && !c.isOutdated(vmi):
data.abortChangeVMIs = append(data.abortChangeVMIs, vmi)
continue
case !c.isOutdated(vmi) && !c.doesRequireMigration(vmi):
continue
}
data.allOutdatedVMIs = append(data.allOutdatedVMIs, vmi)
// don't consider VMIs with migrations inflight as migratable for our dataset
// while a migrating workload can still be counted towards
// the outDatedVMIs list, we don't want to add it to any
// of the lists that results in actions being performed on them
if migrationutils.IsMigrating(vmi) {
continue
} else if exists := lookup[vmi.Namespace+"/"+vmi.Name]; exists {
continue
}
volMig := false
errValid := volumemig.ValidateVolumesUpdateMigration(vmi, nil, vmi.Status.MigratedVolumes)
if len(vmi.Status.MigratedVolumes) > 0 && errValid == nil {
volMig = true
}
if automatedMigrationAllowed && (vmi.IsMigratable() || volMig) {
data.migratableOutdatedVMIs = append(data.migratableOutdatedVMIs, vmi)
} else if automatedShutdownAllowed {
data.evictOutdatedVMIs = append(data.evictOutdatedVMIs, vmi)
}
}
return data
}
func (c *WorkloadUpdateController) execute(key string) error {
obj, exists, err := c.kubeVirtStore.GetByKey(key)
if err != nil {
return err
} else if !exists {
c.migrationExpectations.DeleteExpectations(key)
return nil
}
// don't process anything until expectations are satisfied
// this ensures we don't do things like creating multiple
// migrations for the same vmi
if !c.migrationExpectations.SatisfiedExpectations(key) {
return nil
}
kv := obj.(*virtv1.KubeVirt)
// don't update workloads unless the infra is completely deployed and not updating
if kv.Status.Phase != virtv1.KubeVirtPhaseDeployed {
return nil
} else if kv.Status.ObservedDeploymentID != kv.Status.TargetDeploymentID {
return nil
}
return c.sync(kv)
}
func (c *WorkloadUpdateController) sync(kv *virtv1.KubeVirt) error {
data := c.getUpdateData(kv)
key, err := controller.KeyFunc(kv)
if err != nil {
return err
}
metrics.SetOutdatedVirtualMachineInstanceWorkloads(len(data.allOutdatedVMIs))
// update outdated workload count on kv
if kv.Status.OutdatedVirtualMachineInstanceWorkloads == nil || *kv.Status.OutdatedVirtualMachineInstanceWorkloads != len(data.allOutdatedVMIs) {
l := len(data.allOutdatedVMIs)
kvCopy := kv.DeepCopy()
kvCopy.Status.OutdatedVirtualMachineInstanceWorkloads = &l
patchSet := patch.New()
if kv.Status.OutdatedVirtualMachineInstanceWorkloads == nil {
patchSet.AddOption(patch.WithAdd("/status/outdatedVirtualMachineInstanceWorkloads", kvCopy.Status.OutdatedVirtualMachineInstanceWorkloads))
} else {
patchSet.AddOption(
patch.WithTest("/status/outdatedVirtualMachineInstanceWorkloads", kv.Status.OutdatedVirtualMachineInstanceWorkloads),
patch.WithReplace("/status/outdatedVirtualMachineInstanceWorkloads", kvCopy.Status.OutdatedVirtualMachineInstanceWorkloads),
)
}
patchBytes, err := patchSet.GeneratePayload()
if err != nil {
return err
}
_, err = c.clientset.KubeVirt(kv.Namespace).PatchStatus(context.Background(), kv.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch kubevirt obj status to update the outdatedVirtualMachineInstanceWorkloads valued: %v", err)
}
}
// Rather than enqueing based on VMI activity, we keep periodically poping the loop
// until all VMIs are updated. Watching all VMI activity is chatty for this controller
// when we don't need to be that efficent in how quickly the updates are being processed.
if len(data.evictOutdatedVMIs) != 0 || len(data.migratableOutdatedVMIs) != 0 || len(data.abortChangeVMIs) != 0 {
c.queue.AddAfter(key, periodicReEnqueueIntervalSeconds)
}
// Randomizes list so we don't always re-attempt the same vmis in
// the event that some are having difficulty being relocated
rand.Shuffle(len(data.migratableOutdatedVMIs), func(i, j int) {
data.migratableOutdatedVMIs[i], data.migratableOutdatedVMIs[j] = data.migratableOutdatedVMIs[j], data.migratableOutdatedVMIs[i]
})
batchDeletionInterval := time.Duration(defaultBatchDeletionIntervalSeconds) * time.Second
batchDeletionCount := defaultBatchDeletionCount
if kv.Spec.WorkloadUpdateStrategy.BatchEvictionSize != nil {
batchDeletionCount = *kv.Spec.WorkloadUpdateStrategy.BatchEvictionSize
}
if kv.Spec.WorkloadUpdateStrategy.BatchEvictionInterval != nil {
batchDeletionInterval = kv.Spec.WorkloadUpdateStrategy.BatchEvictionInterval.Duration
}
now := time.Now()
nextBatch := c.lastDeletionBatch.Add(batchDeletionInterval)
if now.After(nextBatch) && len(data.evictOutdatedVMIs) > 0 {
batchDeletionCount = int(math.Min(float64(batchDeletionCount), float64(len(data.evictOutdatedVMIs))))
c.lastDeletionBatch = now
} else {
batchDeletionCount = 0
}
// This is a best effort attempt at not creating a bunch of pending migrations
// in the event that we've hit the global max. This check isn't meant to prevent
// overloading the cluster. The migration controller handles that. We're merely
// optimizing here by not introducing new migration objects we know can't be processed
// right now.
maxParallelMigrations := int(*c.clusterConfig.GetMigrationConfiguration().ParallelMigrationsPerCluster)
maxNewMigrations := maxParallelMigrations - data.numActiveMigrations
if maxNewMigrations < 0 {
maxNewMigrations = 0
}
migrateCount := int(math.Min(float64(maxNewMigrations), float64(len(data.migratableOutdatedVMIs))))
var migrationCandidates []*virtv1.VirtualMachineInstance
if migrateCount > 0 {
migrationCandidates = data.migratableOutdatedVMIs[0:migrateCount]
}
var evictionCandidates []*virtv1.VirtualMachineInstance
if batchDeletionCount > 0 {
evictionCandidates = data.evictOutdatedVMIs[0:batchDeletionCount]
}
wgLen := len(migrationCandidates) + len(evictionCandidates) + len(data.abortChangeVMIs)
wg := &sync.WaitGroup{}
wg.Add(wgLen)
errChan := make(chan error, wgLen)
c.migrationExpectations.ExpectCreations(key, migrateCount)
for _, vmi := range migrationCandidates {
go func(vmi *virtv1.VirtualMachineInstance) {
var labels map[string]string
if isVolumesUpdateInProgress(vmi) {
labels = make(map[string]string)
labels[virtv1.VolumesUpdateMigration] = vmi.Name
if len(vmi.Name) > k8svalidation.DNS1035LabelMaxLength {
// Labels are limited to 63 characters, fall back to UID, remain backwards compatible otherwise
labels[virtv1.VolumesUpdateMigration] = string(vmi.UID)
}
}
defer wg.Done()
wuMigration := &virtv1.VirtualMachineInstanceMigration{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
virtv1.WorkloadUpdateMigrationAnnotation: "",
},
Labels: labels,
GenerateName: "kubevirt-workload-update-",
},
Spec: virtv1.VirtualMachineInstanceMigrationSpec{
VMIName: vmi.Name,
},
}
if c.clusterConfig.MigrationPriorityQueueEnabled() {
// default is upgrade
priority := v1.PrioritySystemCritical
if isHotplugInProgress(vmi) || isVolumesUpdateInProgress(vmi) {
priority = v1.PriorityUserTriggered
}
wuMigration.Spec.Priority = &priority
}
createdMigration, err := c.clientset.VirtualMachineInstanceMigration(vmi.Namespace).Create(context.Background(), wuMigration, metav1.CreateOptions{})
if err != nil {
log.Log.Object(vmi).Reason(err).Errorf("Failed to migrate vmi as part of workload update")
c.migrationExpectations.CreationObserved(key)
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, FailedCreateVirtualMachineInstanceMigrationReason, "Error creating a Migration for automated workload update: %v", err)
errChan <- err
return
} else {
log.Log.Object(vmi).Infof("Initiated migration of vmi as part of workload update")
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, SuccessfulCreateVirtualMachineInstanceMigrationReason, "Created Migration %s for automated workload update", createdMigration.Name)
}
}(vmi)
}
for _, vmi := range evictionCandidates {
go func(vmi *virtv1.VirtualMachineInstance) {
defer wg.Done()
pod, err := controller.CurrentVMIPod(vmi, c.podIndexer)
if err != nil {
log.Log.Object(vmi).Reason(err).Errorf("Failed to detect active pod for vmi during workload update")
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, FailedEvictVirtualMachineInstanceReason, "Error detecting active pod for VMI during workload update: %v", err)
errChan <- err
}
err = c.clientset.CoreV1().Pods(vmi.Namespace).EvictV1beta1(context.Background(),
&policy.Eviction{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
},
DeleteOptions: &metav1.DeleteOptions{},
})
if err != nil && !errors.IsNotFound(err) {
log.Log.Object(vmi).Reason(err).Errorf("Failed to evict vmi as part of workload update")
c.recorder.Eventf(vmi, k8sv1.EventTypeWarning, FailedEvictVirtualMachineInstanceReason, "Error deleting VMI during automated workload update: %v", err)
errChan <- err
} else {
log.Log.Object(vmi).Infof("Evicted vmi pod as part of workload update")
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, SuccessfulEvictVirtualMachineInstanceReason, "Initiated eviction of VMI as part of automated workload update: %v", err)
}
}(vmi)
}
for _, vmi := range data.abortChangeVMIs {
go func(vmi *virtv1.VirtualMachineInstance) {
defer wg.Done()
migList := migrationutils.ListWorkloadUpdateMigrations(c.migrationIndexer, vmi.Name, vmi.Namespace)
for _, mig := range migList {
err = c.clientset.VirtualMachineInstanceMigration(vmi.Namespace).Delete(context.Background(), mig.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
log.Log.Object(vmi).Reason(err).Errorf("Failed to delete the migration due to a migration abortion")
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, FailedChangeAbortionReason, "Failed to abort change for vmi: %s: %v", vmi.Name, err)
errChan <- err
} else if err == nil {
log.Log.Infof("Delete migration %s due to an update change abortion", mig.Name)
c.recorder.Eventf(vmi, k8sv1.EventTypeNormal, SuccessfulChangeAbortionReason, "Aborted change for vmi: %s", vmi.Name)
}
}
}(vmi)
}
wg.Wait()
select {
case err := <-errChan:
return err
default:
}
return nil
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
// This file is build on all arches. Golang only filters files ending with _<arch>.go
package archdefaulter
// Ensure that there is a compile error should the struct not implement the ArchDefaulter interface anymore.
var _ = ArchDefaulter(&defaulterAMD64{})
type defaulterAMD64 struct{}
func (defaulterAMD64) OSTypeArch() string {
return "x86_64"
}
func (defaulterAMD64) OSTypeMachine() string {
// q35 is an alias of the newest q35 machine type.
return "q35"
}
func (defaulterAMD64) DeepCopy() ArchDefaulter {
return defaulterAMD64{}
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
package archdefaulter
import "kubevirt.io/client-go/log"
type ArchDefaulter interface {
OSTypeArch() string
OSTypeMachine() string
DeepCopy() ArchDefaulter
}
func NewArchDefaulter(arch string) ArchDefaulter {
switch arch {
case "arm64":
return defaulterARM64{}
case "s390x":
return defaulterS390X{}
case "amd64":
return defaulterAMD64{}
default:
log.Log.Warning("Trying to create an arch defaulter from an unknown arch: " + arch + ". Falling back to AMD64")
return defaulterAMD64{}
}
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
// This file is build on all arches. Golang only filters files ending with _<arch>.go
package archdefaulter
// Ensure that there is a compile error should the struct not implement the ArchDefaulter interface anymore.
var _ = ArchDefaulter(&defaulterARM64{})
type defaulterARM64 struct{}
func (defaulterARM64) OSTypeArch() string {
return "aarch64"
}
func (defaulterARM64) OSTypeMachine() string {
return "virt"
}
func (defaulterARM64) DeepCopy() ArchDefaulter {
return defaulterARM64{}
}
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright the KubeVirt Authors.
*
*/
// This file is build on all arches. Golang only filters files ending with _<arch>.go
package archdefaulter
// Ensure that there is a compile error should the struct not implement the ArchDefaulter interface anymore.
var _ = ArchDefaulter(&defaulterS390X{})
type defaulterS390X struct{}
func (defaulterS390X) OSTypeArch() string {
return "s390x"
}
func (defaulterS390X) OSTypeMachine() string {
return "s390-ccw-virtio"
}
func (defaulterS390X) DeepCopy() ArchDefaulter {
return defaulterS390X{}
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
This file is part of the KubeVirt project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright The KubeVirt Authors.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package api
import (
runtime "k8s.io/apimachinery/pkg/runtime"
v1 "kubevirt.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ACPI) DeepCopyInto(out *ACPI) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACPI.
func (in *ACPI) DeepCopy() *ACPI {
if in == nil {
return nil
}
out := new(ACPI)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ACPITable) DeepCopyInto(out *ACPITable) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACPITable.
func (in *ACPITable) DeepCopy() *ACPITable {
if in == nil {
return nil
}
out := new(ACPITable)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AccessCredentialMetadata) DeepCopyInto(out *AccessCredentialMetadata) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessCredentialMetadata.
func (in *AccessCredentialMetadata) DeepCopy() *AccessCredentialMetadata {
if in == nil {
return nil
}
out := new(AccessCredentialMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Address) DeepCopyInto(out *Address) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Address.
func (in *Address) DeepCopy() *Address {
if in == nil {
return nil
}
out := new(Address)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Alias) DeepCopyInto(out *Alias) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alias.
func (in *Alias) DeepCopy() *Alias {
if in == nil {
return nil
}
out := new(Alias)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Arg) DeepCopyInto(out *Arg) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Arg.
func (in *Arg) DeepCopy() *Arg {
if in == nil {
return nil
}
out := new(Arg)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BIOS) DeepCopyInto(out *BIOS) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BIOS.
func (in *BIOS) DeepCopy() *BIOS {
if in == nil {
return nil
}
out := new(BIOS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackingStore) DeepCopyInto(out *BackingStore) {
*out = *in
if in.Format != nil {
in, out := &in.Format, &out.Format
*out = new(BackingStoreFormat)
**out = **in
}
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(DiskSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackingStore.
func (in *BackingStore) DeepCopy() *BackingStore {
if in == nil {
return nil
}
out := new(BackingStore)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackingStoreFormat) DeepCopyInto(out *BackingStoreFormat) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackingStoreFormat.
func (in *BackingStoreFormat) DeepCopy() *BackingStoreFormat {
if in == nil {
return nil
}
out := new(BackingStoreFormat)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupDisk) DeepCopyInto(out *BackupDisk) {
*out = *in
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(BackupTarget)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupDisk.
func (in *BackupDisk) DeepCopy() *BackupDisk {
if in == nil {
return nil
}
out := new(BackupDisk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupDisks) DeepCopyInto(out *BackupDisks) {
*out = *in
if in.Disks != nil {
in, out := &in.Disks, &out.Disks
*out = make([]BackupDisk, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupDisks.
func (in *BackupDisks) DeepCopy() *BackupDisks {
if in == nil {
return nil
}
out := new(BackupDisks)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupMetadata) DeepCopyInto(out *BackupMetadata) {
*out = *in
if in.StartTimestamp != nil {
in, out := &in.StartTimestamp, &out.StartTimestamp
*out = (*in).DeepCopy()
}
if in.EndTimestamp != nil {
in, out := &in.EndTimestamp, &out.EndTimestamp
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupMetadata.
func (in *BackupMetadata) DeepCopy() *BackupMetadata {
if in == nil {
return nil
}
out := new(BackupMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupTarget) DeepCopyInto(out *BackupTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupTarget.
func (in *BackupTarget) DeepCopy() *BackupTarget {
if in == nil {
return nil
}
out := new(BackupTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BandWidth) DeepCopyInto(out *BandWidth) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandWidth.
func (in *BandWidth) DeepCopy() *BandWidth {
if in == nil {
return nil
}
out := new(BandWidth)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BlockIO) DeepCopyInto(out *BlockIO) {
*out = *in
if in.DiscardGranularity != nil {
in, out := &in.DiscardGranularity, &out.DiscardGranularity
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockIO.
func (in *BlockIO) DeepCopy() *BlockIO {
if in == nil {
return nil
}
out := new(BlockIO)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Boot) DeepCopyInto(out *Boot) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Boot.
func (in *Boot) DeepCopy() *Boot {
if in == nil {
return nil
}
out := new(Boot)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BootMenu) DeepCopyInto(out *BootMenu) {
*out = *in
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootMenu.
func (in *BootMenu) DeepCopy() *BootMenu {
if in == nil {
return nil
}
out := new(BootMenu)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BootOrder) DeepCopyInto(out *BootOrder) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootOrder.
func (in *BootOrder) DeepCopy() *BootOrder {
if in == nil {
return nil
}
out := new(BootOrder)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CID) DeepCopyInto(out *CID) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CID.
func (in *CID) DeepCopy() *CID {
if in == nil {
return nil
}
out := new(CID)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPU) DeepCopyInto(out *CPU) {
*out = *in
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = make([]CPUFeature, len(*in))
copy(*out, *in)
}
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(CPUTopology)
**out = **in
}
if in.NUMA != nil {
in, out := &in.NUMA, &out.NUMA
*out = new(NUMA)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU.
func (in *CPU) DeepCopy() *CPU {
if in == nil {
return nil
}
out := new(CPU)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPUEmulatorPin) DeepCopyInto(out *CPUEmulatorPin) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUEmulatorPin.
func (in *CPUEmulatorPin) DeepCopy() *CPUEmulatorPin {
if in == nil {
return nil
}
out := new(CPUEmulatorPin)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPUFeature) DeepCopyInto(out *CPUFeature) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUFeature.
func (in *CPUFeature) DeepCopy() *CPUFeature {
if in == nil {
return nil
}
out := new(CPUFeature)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPUTopology) DeepCopyInto(out *CPUTopology) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUTopology.
func (in *CPUTopology) DeepCopy() *CPUTopology {
if in == nil {
return nil
}
out := new(CPUTopology)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPUTune) DeepCopyInto(out *CPUTune) {
*out = *in
if in.VCPUPin != nil {
in, out := &in.VCPUPin, &out.VCPUPin
*out = make([]CPUTuneVCPUPin, len(*in))
copy(*out, *in)
}
if in.IOThreadPin != nil {
in, out := &in.IOThreadPin, &out.IOThreadPin
*out = make([]CPUTuneIOThreadPin, len(*in))
copy(*out, *in)
}
if in.EmulatorPin != nil {
in, out := &in.EmulatorPin, &out.EmulatorPin
*out = new(CPUEmulatorPin)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUTune.
func (in *CPUTune) DeepCopy() *CPUTune {
if in == nil {
return nil
}
out := new(CPUTune)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPUTuneIOThreadPin) DeepCopyInto(out *CPUTuneIOThreadPin) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUTuneIOThreadPin.
func (in *CPUTuneIOThreadPin) DeepCopy() *CPUTuneIOThreadPin {
if in == nil {
return nil
}
out := new(CPUTuneIOThreadPin)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPUTuneVCPUPin) DeepCopyInto(out *CPUTuneVCPUPin) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUTuneVCPUPin.
func (in *CPUTuneVCPUPin) DeepCopy() *CPUTuneVCPUPin {
if in == nil {
return nil
}
out := new(CPUTuneVCPUPin)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Channel) DeepCopyInto(out *Channel) {
*out = *in
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(ChannelSource)
**out = **in
}
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(ChannelTarget)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Channel.
func (in *Channel) DeepCopy() *Channel {
if in == nil {
return nil
}
out := new(Channel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChannelSource) DeepCopyInto(out *ChannelSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelSource.
func (in *ChannelSource) DeepCopy() *ChannelSource {
if in == nil {
return nil
}
out := new(ChannelSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChannelTarget) DeepCopyInto(out *ChannelTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelTarget.
func (in *ChannelTarget) DeepCopy() *ChannelTarget {
if in == nil {
return nil
}
out := new(ChannelTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CheckpointDisk) DeepCopyInto(out *CheckpointDisk) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointDisk.
func (in *CheckpointDisk) DeepCopy() *CheckpointDisk {
if in == nil {
return nil
}
out := new(CheckpointDisk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CheckpointDisks) DeepCopyInto(out *CheckpointDisks) {
*out = *in
if in.Disks != nil {
in, out := &in.Disks, &out.Disks
*out = make([]CheckpointDisk, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointDisks.
func (in *CheckpointDisks) DeepCopy() *CheckpointDisks {
if in == nil {
return nil
}
out := new(CheckpointDisks)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CheckpointParent) DeepCopyInto(out *CheckpointParent) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckpointParent.
func (in *CheckpointParent) DeepCopy() *CheckpointParent {
if in == nil {
return nil
}
out := new(CheckpointParent)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Clock) DeepCopyInto(out *Clock) {
*out = *in
if in.Timer != nil {
in, out := &in.Timer, &out.Timer
*out = make([]Timer, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Clock.
func (in *Clock) DeepCopy() *Clock {
if in == nil {
return nil
}
out := new(Clock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Commandline) DeepCopyInto(out *Commandline) {
*out = *in
if in.QEMUEnv != nil {
in, out := &in.QEMUEnv, &out.QEMUEnv
*out = make([]Env, len(*in))
copy(*out, *in)
}
if in.QEMUArg != nil {
in, out := &in.QEMUArg, &out.QEMUArg
*out = make([]Arg, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Commandline.
func (in *Commandline) DeepCopy() *Commandline {
if in == nil {
return nil
}
out := new(Commandline)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Console) DeepCopyInto(out *Console) {
*out = *in
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(ConsoleTarget)
(*in).DeepCopyInto(*out)
}
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(ConsoleSource)
**out = **in
}
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console.
func (in *Console) DeepCopy() *Console {
if in == nil {
return nil
}
out := new(Console)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConsoleSource) DeepCopyInto(out *ConsoleSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSource.
func (in *ConsoleSource) DeepCopy() *ConsoleSource {
if in == nil {
return nil
}
out := new(ConsoleSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConsoleTarget) DeepCopyInto(out *ConsoleTarget) {
*out = *in
if in.Type != nil {
in, out := &in.Type, &out.Type
*out = new(string)
**out = **in
}
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleTarget.
func (in *ConsoleTarget) DeepCopy() *ConsoleTarget {
if in == nil {
return nil
}
out := new(ConsoleTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Controller) DeepCopyInto(out *Controller) {
*out = *in
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(ControllerDriver)
(*in).DeepCopyInto(*out)
}
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
if in.PCIHole64 != nil {
in, out := &in.PCIHole64, &out.PCIHole64
*out = new(PCIHole64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Controller.
func (in *Controller) DeepCopy() *Controller {
if in == nil {
return nil
}
out := new(Controller)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerDriver) DeepCopyInto(out *ControllerDriver) {
*out = *in
if in.IOThread != nil {
in, out := &in.IOThread, &out.IOThread
*out = new(uint)
**out = **in
}
if in.Queues != nil {
in, out := &in.Queues, &out.Queues
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerDriver.
func (in *ControllerDriver) DeepCopy() *ControllerDriver {
if in == nil {
return nil
}
out := new(ControllerDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataStore) DeepCopyInto(out *DataStore) {
*out = *in
if in.Format != nil {
in, out := &in.Format, &out.Format
*out = new(DataStoreFormat)
**out = **in
}
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(DiskSource)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStore.
func (in *DataStore) DeepCopy() *DataStore {
if in == nil {
return nil
}
out := new(DataStore)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataStoreFormat) DeepCopyInto(out *DataStoreFormat) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStoreFormat.
func (in *DataStoreFormat) DeepCopy() *DataStoreFormat {
if in == nil {
return nil
}
out := new(DataStoreFormat)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Defaulter) DeepCopyInto(out *Defaulter) {
*out = *in
out.ArchDefaulter = in.ArchDefaulter.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Defaulter.
func (in *Defaulter) DeepCopy() *Defaulter {
if in == nil {
return nil
}
out := new(Defaulter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Devices) DeepCopyInto(out *Devices) {
*out = *in
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]Interface, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Channels != nil {
in, out := &in.Channels, &out.Channels
*out = make([]Channel, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.HostDevices != nil {
in, out := &in.HostDevices, &out.HostDevices
*out = make([]HostDevice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.PanicDevices != nil {
in, out := &in.PanicDevices, &out.PanicDevices
*out = make([]PanicDevice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Controllers != nil {
in, out := &in.Controllers, &out.Controllers
*out = make([]Controller, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Video != nil {
in, out := &in.Video, &out.Video
*out = make([]Video, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Graphics != nil {
in, out := &in.Graphics, &out.Graphics
*out = make([]Graphics, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Ballooning != nil {
in, out := &in.Ballooning, &out.Ballooning
*out = new(MemBalloon)
(*in).DeepCopyInto(*out)
}
if in.Disks != nil {
in, out := &in.Disks, &out.Disks
*out = make([]Disk, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]Input, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Serials != nil {
in, out := &in.Serials, &out.Serials
*out = make([]Serial, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Consoles != nil {
in, out := &in.Consoles, &out.Consoles
*out = make([]Console, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Watchdogs != nil {
in, out := &in.Watchdogs, &out.Watchdogs
*out = make([]Watchdog, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Rng != nil {
in, out := &in.Rng, &out.Rng
*out = new(Rng)
(*in).DeepCopyInto(*out)
}
if in.Filesystems != nil {
in, out := &in.Filesystems, &out.Filesystems
*out = make([]FilesystemDevice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Redirs != nil {
in, out := &in.Redirs, &out.Redirs
*out = make([]RedirectedDevice, len(*in))
copy(*out, *in)
}
if in.SoundCards != nil {
in, out := &in.SoundCards, &out.SoundCards
*out = make([]SoundCard, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TPMs != nil {
in, out := &in.TPMs, &out.TPMs
*out = make([]TPM, len(*in))
copy(*out, *in)
}
if in.VSOCK != nil {
in, out := &in.VSOCK, &out.VSOCK
*out = new(VSOCK)
**out = **in
}
if in.Memory != nil {
in, out := &in.Memory, &out.Memory
*out = new(MemoryDevice)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Devices.
func (in *Devices) DeepCopy() *Devices {
if in == nil {
return nil
}
out := new(Devices)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Disk) DeepCopyInto(out *Disk) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
out.Target = in.Target
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(DiskDriver)
(*in).DeepCopyInto(*out)
}
if in.ReadOnly != nil {
in, out := &in.ReadOnly, &out.ReadOnly
*out = new(ReadOnly)
**out = **in
}
if in.Auth != nil {
in, out := &in.Auth, &out.Auth
*out = new(DiskAuth)
(*in).DeepCopyInto(*out)
}
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
if in.BackingStore != nil {
in, out := &in.BackingStore, &out.BackingStore
*out = new(BackingStore)
(*in).DeepCopyInto(*out)
}
if in.BootOrder != nil {
in, out := &in.BootOrder, &out.BootOrder
*out = new(BootOrder)
**out = **in
}
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
if in.BlockIO != nil {
in, out := &in.BlockIO, &out.BlockIO
*out = new(BlockIO)
(*in).DeepCopyInto(*out)
}
if in.FilesystemOverhead != nil {
in, out := &in.FilesystemOverhead, &out.FilesystemOverhead
*out = new(v1.Percent)
**out = **in
}
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = new(int64)
**out = **in
}
if in.Shareable != nil {
in, out := &in.Shareable, &out.Shareable
*out = new(Shareable)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Disk.
func (in *Disk) DeepCopy() *Disk {
if in == nil {
return nil
}
out := new(Disk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskAuth) DeepCopyInto(out *DiskAuth) {
*out = *in
if in.Secret != nil {
in, out := &in.Secret, &out.Secret
*out = new(DiskSecret)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskAuth.
func (in *DiskAuth) DeepCopy() *DiskAuth {
if in == nil {
return nil
}
out := new(DiskAuth)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskDriver) DeepCopyInto(out *DiskDriver) {
*out = *in
if in.IOThread != nil {
in, out := &in.IOThread, &out.IOThread
*out = new(uint)
**out = **in
}
if in.IOThreads != nil {
in, out := &in.IOThreads, &out.IOThreads
*out = new(DiskIOThreads)
(*in).DeepCopyInto(*out)
}
if in.Queues != nil {
in, out := &in.Queues, &out.Queues
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskDriver.
func (in *DiskDriver) DeepCopy() *DiskDriver {
if in == nil {
return nil
}
out := new(DiskDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskIOThread) DeepCopyInto(out *DiskIOThread) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIOThread.
func (in *DiskIOThread) DeepCopy() *DiskIOThread {
if in == nil {
return nil
}
out := new(DiskIOThread)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskIOThreads) DeepCopyInto(out *DiskIOThreads) {
*out = *in
if in.IOThread != nil {
in, out := &in.IOThread, &out.IOThread
*out = make([]DiskIOThread, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskIOThreads.
func (in *DiskIOThreads) DeepCopy() *DiskIOThreads {
if in == nil {
return nil
}
out := new(DiskIOThreads)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskSecret) DeepCopyInto(out *DiskSecret) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSecret.
func (in *DiskSecret) DeepCopy() *DiskSecret {
if in == nil {
return nil
}
out := new(DiskSecret)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskSource) DeepCopyInto(out *DiskSource) {
*out = *in
if in.Host != nil {
in, out := &in.Host, &out.Host
*out = new(DiskSourceHost)
**out = **in
}
if in.Reservations != nil {
in, out := &in.Reservations, &out.Reservations
*out = new(Reservations)
(*in).DeepCopyInto(*out)
}
if in.Slices != nil {
in, out := &in.Slices, &out.Slices
*out = make([]Slice, len(*in))
copy(*out, *in)
}
if in.DataStore != nil {
in, out := &in.DataStore, &out.DataStore
*out = new(DataStore)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSource.
func (in *DiskSource) DeepCopy() *DiskSource {
if in == nil {
return nil
}
out := new(DiskSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskSourceHost) DeepCopyInto(out *DiskSourceHost) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSourceHost.
func (in *DiskSourceHost) DeepCopy() *DiskSourceHost {
if in == nil {
return nil
}
out := new(DiskSourceHost)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskTarget) DeepCopyInto(out *DiskTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskTarget.
func (in *DiskTarget) DeepCopy() *DiskTarget {
if in == nil {
return nil
}
out := new(DiskTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Domain) DeepCopyInto(out *Domain) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain.
func (in *Domain) DeepCopy() *Domain {
if in == nil {
return nil
}
out := new(Domain)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Domain) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainBackup) DeepCopyInto(out *DomainBackup) {
*out = *in
out.XMLName = in.XMLName
if in.Incremental != nil {
in, out := &in.Incremental, &out.Incremental
*out = new(string)
**out = **in
}
if in.BackupDisks != nil {
in, out := &in.BackupDisks, &out.BackupDisks
*out = new(BackupDisks)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainBackup.
func (in *DomainBackup) DeepCopy() *DomainBackup {
if in == nil {
return nil
}
out := new(DomainBackup)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainCheckpoint) DeepCopyInto(out *DomainCheckpoint) {
*out = *in
out.XMLName = in.XMLName
if in.CheckpointDisks != nil {
in, out := &in.CheckpointDisks, &out.CheckpointDisks
*out = new(CheckpointDisks)
(*in).DeepCopyInto(*out)
}
if in.CreationTime != nil {
in, out := &in.CreationTime, &out.CreationTime
*out = new(uint64)
**out = **in
}
if in.Parent != nil {
in, out := &in.Parent, &out.Parent
*out = new(CheckpointParent)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainCheckpoint.
func (in *DomainCheckpoint) DeepCopy() *DomainCheckpoint {
if in == nil {
return nil
}
out := new(DomainCheckpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainGuestInfo) DeepCopyInto(out *DomainGuestInfo) {
*out = *in
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]InterfaceStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.OSInfo != nil {
in, out := &in.OSInfo, &out.OSInfo
*out = new(GuestOSInfo)
**out = **in
}
if in.FSFreezeStatus != nil {
in, out := &in.FSFreezeStatus, &out.FSFreezeStatus
*out = new(FSFreeze)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainGuestInfo.
func (in *DomainGuestInfo) DeepCopy() *DomainGuestInfo {
if in == nil {
return nil
}
out := new(DomainGuestInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainList) DeepCopyInto(out *DomainList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Domain, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainList.
func (in *DomainList) DeepCopy() *DomainList {
if in == nil {
return nil
}
out := new(DomainList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DomainList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainSpec) DeepCopyInto(out *DomainSpec) {
*out = *in
out.XMLName = in.XMLName
out.Memory = in.Memory
if in.CurrentMemory != nil {
in, out := &in.CurrentMemory, &out.CurrentMemory
*out = new(Memory)
**out = **in
}
if in.MaxMemory != nil {
in, out := &in.MaxMemory, &out.MaxMemory
*out = new(MaxMemory)
**out = **in
}
if in.MemoryBacking != nil {
in, out := &in.MemoryBacking, &out.MemoryBacking
*out = new(MemoryBacking)
(*in).DeepCopyInto(*out)
}
in.OS.DeepCopyInto(&out.OS)
if in.SysInfo != nil {
in, out := &in.SysInfo, &out.SysInfo
*out = new(SysInfo)
(*in).DeepCopyInto(*out)
}
in.Devices.DeepCopyInto(&out.Devices)
if in.Clock != nil {
in, out := &in.Clock, &out.Clock
*out = new(Clock)
(*in).DeepCopyInto(*out)
}
if in.Resource != nil {
in, out := &in.Resource, &out.Resource
*out = new(Resource)
**out = **in
}
if in.QEMUCmd != nil {
in, out := &in.QEMUCmd, &out.QEMUCmd
*out = new(Commandline)
(*in).DeepCopyInto(*out)
}
in.Metadata.DeepCopyInto(&out.Metadata)
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = new(Features)
(*in).DeepCopyInto(*out)
}
in.CPU.DeepCopyInto(&out.CPU)
if in.VCPU != nil {
in, out := &in.VCPU, &out.VCPU
*out = new(VCPU)
**out = **in
}
if in.VCPUs != nil {
in, out := &in.VCPUs, &out.VCPUs
*out = new(VCPUs)
(*in).DeepCopyInto(*out)
}
if in.CPUTune != nil {
in, out := &in.CPUTune, &out.CPUTune
*out = new(CPUTune)
(*in).DeepCopyInto(*out)
}
if in.NUMATune != nil {
in, out := &in.NUMATune, &out.NUMATune
*out = new(NUMATune)
(*in).DeepCopyInto(*out)
}
if in.IOThreads != nil {
in, out := &in.IOThreads, &out.IOThreads
*out = new(IOThreads)
**out = **in
}
if in.LaunchSecurity != nil {
in, out := &in.LaunchSecurity, &out.LaunchSecurity
*out = new(LaunchSecurity)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSpec.
func (in *DomainSpec) DeepCopy() *DomainSpec {
if in == nil {
return nil
}
out := new(DomainSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainStatus) DeepCopyInto(out *DomainStatus) {
*out = *in
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]InterfaceStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
out.OSInfo = in.OSInfo
out.FSFreezeStatus = in.FSFreezeStatus
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainStatus.
func (in *DomainStatus) DeepCopy() *DomainStatus {
if in == nil {
return nil
}
out := new(DomainStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainSysInfo) DeepCopyInto(out *DomainSysInfo) {
*out = *in
out.OSInfo = in.OSInfo
out.Timezone = in.Timezone
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSysInfo.
func (in *DomainSysInfo) DeepCopy() *DomainSysInfo {
if in == nil {
return nil
}
out := new(DomainSysInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Entry) DeepCopyInto(out *Entry) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Entry.
func (in *Entry) DeepCopy() *Entry {
if in == nil {
return nil
}
out := new(Entry)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Env) DeepCopyInto(out *Env) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Env.
func (in *Env) DeepCopy() *Env {
if in == nil {
return nil
}
out := new(Env)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FSDisk) DeepCopyInto(out *FSDisk) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSDisk.
func (in *FSDisk) DeepCopy() *FSDisk {
if in == nil {
return nil
}
out := new(FSDisk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FSFreeze) DeepCopyInto(out *FSFreeze) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSFreeze.
func (in *FSFreeze) DeepCopy() *FSFreeze {
if in == nil {
return nil
}
out := new(FSFreeze)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureEnabled) DeepCopyInto(out *FeatureEnabled) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureEnabled.
func (in *FeatureEnabled) DeepCopy() *FeatureEnabled {
if in == nil {
return nil
}
out := new(FeatureEnabled)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureHyperv) DeepCopyInto(out *FeatureHyperv) {
*out = *in
if in.Relaxed != nil {
in, out := &in.Relaxed, &out.Relaxed
*out = new(FeatureState)
**out = **in
}
if in.VAPIC != nil {
in, out := &in.VAPIC, &out.VAPIC
*out = new(FeatureState)
**out = **in
}
if in.Spinlocks != nil {
in, out := &in.Spinlocks, &out.Spinlocks
*out = new(FeatureSpinlocks)
(*in).DeepCopyInto(*out)
}
if in.VPIndex != nil {
in, out := &in.VPIndex, &out.VPIndex
*out = new(FeatureState)
**out = **in
}
if in.Runtime != nil {
in, out := &in.Runtime, &out.Runtime
*out = new(FeatureState)
**out = **in
}
if in.SyNIC != nil {
in, out := &in.SyNIC, &out.SyNIC
*out = new(FeatureState)
**out = **in
}
if in.SyNICTimer != nil {
in, out := &in.SyNICTimer, &out.SyNICTimer
*out = new(SyNICTimer)
(*in).DeepCopyInto(*out)
}
if in.Reset != nil {
in, out := &in.Reset, &out.Reset
*out = new(FeatureState)
**out = **in
}
if in.VendorID != nil {
in, out := &in.VendorID, &out.VendorID
*out = new(FeatureVendorID)
**out = **in
}
if in.Frequencies != nil {
in, out := &in.Frequencies, &out.Frequencies
*out = new(FeatureState)
**out = **in
}
if in.Reenlightenment != nil {
in, out := &in.Reenlightenment, &out.Reenlightenment
*out = new(FeatureState)
**out = **in
}
if in.TLBFlush != nil {
in, out := &in.TLBFlush, &out.TLBFlush
*out = new(FeatureState)
**out = **in
}
if in.IPI != nil {
in, out := &in.IPI, &out.IPI
*out = new(FeatureState)
**out = **in
}
if in.EVMCS != nil {
in, out := &in.EVMCS, &out.EVMCS
*out = new(FeatureState)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureHyperv.
func (in *FeatureHyperv) DeepCopy() *FeatureHyperv {
if in == nil {
return nil
}
out := new(FeatureHyperv)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureKVM) DeepCopyInto(out *FeatureKVM) {
*out = *in
if in.Hidden != nil {
in, out := &in.Hidden, &out.Hidden
*out = new(FeatureState)
**out = **in
}
if in.HintDedicated != nil {
in, out := &in.HintDedicated, &out.HintDedicated
*out = new(FeatureState)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureKVM.
func (in *FeatureKVM) DeepCopy() *FeatureKVM {
if in == nil {
return nil
}
out := new(FeatureKVM)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeaturePVSpinlock) DeepCopyInto(out *FeaturePVSpinlock) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeaturePVSpinlock.
func (in *FeaturePVSpinlock) DeepCopy() *FeaturePVSpinlock {
if in == nil {
return nil
}
out := new(FeaturePVSpinlock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureSpinlocks) DeepCopyInto(out *FeatureSpinlocks) {
*out = *in
if in.Retries != nil {
in, out := &in.Retries, &out.Retries
*out = new(uint32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureSpinlocks.
func (in *FeatureSpinlocks) DeepCopy() *FeatureSpinlocks {
if in == nil {
return nil
}
out := new(FeatureSpinlocks)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureState) DeepCopyInto(out *FeatureState) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureState.
func (in *FeatureState) DeepCopy() *FeatureState {
if in == nil {
return nil
}
out := new(FeatureState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureVendorID) DeepCopyInto(out *FeatureVendorID) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureVendorID.
func (in *FeatureVendorID) DeepCopy() *FeatureVendorID {
if in == nil {
return nil
}
out := new(FeatureVendorID)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Features) DeepCopyInto(out *Features) {
*out = *in
if in.ACPI != nil {
in, out := &in.ACPI, &out.ACPI
*out = new(FeatureEnabled)
**out = **in
}
if in.APIC != nil {
in, out := &in.APIC, &out.APIC
*out = new(FeatureEnabled)
**out = **in
}
if in.Hyperv != nil {
in, out := &in.Hyperv, &out.Hyperv
*out = new(FeatureHyperv)
(*in).DeepCopyInto(*out)
}
if in.SMM != nil {
in, out := &in.SMM, &out.SMM
*out = new(FeatureEnabled)
**out = **in
}
if in.KVM != nil {
in, out := &in.KVM, &out.KVM
*out = new(FeatureKVM)
(*in).DeepCopyInto(*out)
}
if in.PVSpinlock != nil {
in, out := &in.PVSpinlock, &out.PVSpinlock
*out = new(FeaturePVSpinlock)
**out = **in
}
if in.PMU != nil {
in, out := &in.PMU, &out.PMU
*out = new(FeatureState)
**out = **in
}
if in.VMPort != nil {
in, out := &in.VMPort, &out.VMPort
*out = new(FeatureState)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Features.
func (in *Features) DeepCopy() *Features {
if in == nil {
return nil
}
out := new(Features)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Filesystem) DeepCopyInto(out *Filesystem) {
*out = *in
if in.Disk != nil {
in, out := &in.Disk, &out.Disk
*out = make([]FSDisk, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filesystem.
func (in *Filesystem) DeepCopy() *Filesystem {
if in == nil {
return nil
}
out := new(Filesystem)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilesystemBinary) DeepCopyInto(out *FilesystemBinary) {
*out = *in
if in.Cache != nil {
in, out := &in.Cache, &out.Cache
*out = new(FilesystemBinaryCache)
**out = **in
}
if in.Lock != nil {
in, out := &in.Lock, &out.Lock
*out = new(FilesystemBinaryLock)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemBinary.
func (in *FilesystemBinary) DeepCopy() *FilesystemBinary {
if in == nil {
return nil
}
out := new(FilesystemBinary)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilesystemBinaryCache) DeepCopyInto(out *FilesystemBinaryCache) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemBinaryCache.
func (in *FilesystemBinaryCache) DeepCopy() *FilesystemBinaryCache {
if in == nil {
return nil
}
out := new(FilesystemBinaryCache)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilesystemBinaryLock) DeepCopyInto(out *FilesystemBinaryLock) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemBinaryLock.
func (in *FilesystemBinaryLock) DeepCopy() *FilesystemBinaryLock {
if in == nil {
return nil
}
out := new(FilesystemBinaryLock)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilesystemDevice) DeepCopyInto(out *FilesystemDevice) {
*out = *in
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(FilesystemSource)
**out = **in
}
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(FilesystemTarget)
**out = **in
}
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(FilesystemDriver)
**out = **in
}
if in.Binary != nil {
in, out := &in.Binary, &out.Binary
*out = new(FilesystemBinary)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemDevice.
func (in *FilesystemDevice) DeepCopy() *FilesystemDevice {
if in == nil {
return nil
}
out := new(FilesystemDevice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilesystemDriver) DeepCopyInto(out *FilesystemDriver) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemDriver.
func (in *FilesystemDriver) DeepCopy() *FilesystemDriver {
if in == nil {
return nil
}
out := new(FilesystemDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilesystemSource) DeepCopyInto(out *FilesystemSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemSource.
func (in *FilesystemSource) DeepCopy() *FilesystemSource {
if in == nil {
return nil
}
out := new(FilesystemSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilesystemTarget) DeepCopyInto(out *FilesystemTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemTarget.
func (in *FilesystemTarget) DeepCopy() *FilesystemTarget {
if in == nil {
return nil
}
out := new(FilesystemTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FilterRef) DeepCopyInto(out *FilterRef) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilterRef.
func (in *FilterRef) DeepCopy() *FilterRef {
if in == nil {
return nil
}
out := new(FilterRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GracePeriodMetadata) DeepCopyInto(out *GracePeriodMetadata) {
*out = *in
if in.DeletionTimestamp != nil {
in, out := &in.DeletionTimestamp, &out.DeletionTimestamp
*out = (*in).DeepCopy()
}
if in.MarkedForGracefulShutdown != nil {
in, out := &in.MarkedForGracefulShutdown, &out.MarkedForGracefulShutdown
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GracePeriodMetadata.
func (in *GracePeriodMetadata) DeepCopy() *GracePeriodMetadata {
if in == nil {
return nil
}
out := new(GracePeriodMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Graphics) DeepCopyInto(out *Graphics) {
*out = *in
if in.Listen != nil {
in, out := &in.Listen, &out.Listen
*out = new(GraphicsListen)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Graphics.
func (in *Graphics) DeepCopy() *Graphics {
if in == nil {
return nil
}
out := new(Graphics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GraphicsListen) DeepCopyInto(out *GraphicsListen) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphicsListen.
func (in *GraphicsListen) DeepCopy() *GraphicsListen {
if in == nil {
return nil
}
out := new(GraphicsListen)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GuestOSInfo) DeepCopyInto(out *GuestOSInfo) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestOSInfo.
func (in *GuestOSInfo) DeepCopy() *GuestOSInfo {
if in == nil {
return nil
}
out := new(GuestOSInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostDevice) DeepCopyInto(out *HostDevice) {
*out = *in
out.XMLName = in.XMLName
in.Source.DeepCopyInto(&out.Source)
if in.BootOrder != nil {
in, out := &in.BootOrder, &out.BootOrder
*out = new(BootOrder)
**out = **in
}
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostDevice.
func (in *HostDevice) DeepCopy() *HostDevice {
if in == nil {
return nil
}
out := new(HostDevice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostDeviceSource) DeepCopyInto(out *HostDeviceSource) {
*out = *in
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostDeviceSource.
func (in *HostDeviceSource) DeepCopy() *HostDeviceSource {
if in == nil {
return nil
}
out := new(HostDeviceSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HugePage) DeepCopyInto(out *HugePage) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePage.
func (in *HugePage) DeepCopy() *HugePage {
if in == nil {
return nil
}
out := new(HugePage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HugePages) DeepCopyInto(out *HugePages) {
*out = *in
if in.HugePage != nil {
in, out := &in.HugePage, &out.HugePage
*out = make([]HugePage, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePages.
func (in *HugePages) DeepCopy() *HugePages {
if in == nil {
return nil
}
out := new(HugePages)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IOThreads) DeepCopyInto(out *IOThreads) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IOThreads.
func (in *IOThreads) DeepCopy() *IOThreads {
if in == nil {
return nil
}
out := new(IOThreads)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Input) DeepCopyInto(out *Input) {
*out = *in
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Input.
func (in *Input) DeepCopy() *Input {
if in == nil {
return nil
}
out := new(Input)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Interface) DeepCopyInto(out *Interface) {
*out = *in
out.XMLName = in.XMLName
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
in.Source.DeepCopyInto(&out.Source)
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(InterfaceTarget)
**out = **in
}
if in.Model != nil {
in, out := &in.Model, &out.Model
*out = new(Model)
**out = **in
}
if in.MAC != nil {
in, out := &in.MAC, &out.MAC
*out = new(MAC)
**out = **in
}
if in.MTU != nil {
in, out := &in.MTU, &out.MTU
*out = new(MTU)
**out = **in
}
if in.BandWidth != nil {
in, out := &in.BandWidth, &out.BandWidth
*out = new(BandWidth)
**out = **in
}
if in.BootOrder != nil {
in, out := &in.BootOrder, &out.BootOrder
*out = new(BootOrder)
**out = **in
}
if in.LinkState != nil {
in, out := &in.LinkState, &out.LinkState
*out = new(LinkState)
**out = **in
}
if in.FilterRef != nil {
in, out := &in.FilterRef, &out.FilterRef
*out = new(FilterRef)
**out = **in
}
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(InterfaceDriver)
(*in).DeepCopyInto(*out)
}
if in.Rom != nil {
in, out := &in.Rom, &out.Rom
*out = new(Rom)
**out = **in
}
if in.ACPI != nil {
in, out := &in.ACPI, &out.ACPI
*out = new(ACPI)
**out = **in
}
if in.Backend != nil {
in, out := &in.Backend, &out.Backend
*out = new(InterfaceBackend)
**out = **in
}
if in.PortForward != nil {
in, out := &in.PortForward, &out.PortForward
*out = make([]InterfacePortForward, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Interface.
func (in *Interface) DeepCopy() *Interface {
if in == nil {
return nil
}
out := new(Interface)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InterfaceBackend) DeepCopyInto(out *InterfaceBackend) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceBackend.
func (in *InterfaceBackend) DeepCopy() *InterfaceBackend {
if in == nil {
return nil
}
out := new(InterfaceBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InterfaceDriver) DeepCopyInto(out *InterfaceDriver) {
*out = *in
if in.Queues != nil {
in, out := &in.Queues, &out.Queues
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceDriver.
func (in *InterfaceDriver) DeepCopy() *InterfaceDriver {
if in == nil {
return nil
}
out := new(InterfaceDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InterfacePortForward) DeepCopyInto(out *InterfacePortForward) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]InterfacePortForwardRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfacePortForward.
func (in *InterfacePortForward) DeepCopy() *InterfacePortForward {
if in == nil {
return nil
}
out := new(InterfacePortForward)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InterfacePortForwardRange) DeepCopyInto(out *InterfacePortForwardRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfacePortForwardRange.
func (in *InterfacePortForwardRange) DeepCopy() *InterfacePortForwardRange {
if in == nil {
return nil
}
out := new(InterfacePortForwardRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InterfaceSource) DeepCopyInto(out *InterfaceSource) {
*out = *in
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceSource.
func (in *InterfaceSource) DeepCopy() *InterfaceSource {
if in == nil {
return nil
}
out := new(InterfaceSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InterfaceStatus) DeepCopyInto(out *InterfaceStatus) {
*out = *in
if in.IPs != nil {
in, out := &in.IPs, &out.IPs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceStatus.
func (in *InterfaceStatus) DeepCopy() *InterfaceStatus {
if in == nil {
return nil
}
out := new(InterfaceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InterfaceTarget) DeepCopyInto(out *InterfaceTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceTarget.
func (in *InterfaceTarget) DeepCopy() *InterfaceTarget {
if in == nil {
return nil
}
out := new(InterfaceTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeVirtMetadata) DeepCopyInto(out *KubeVirtMetadata) {
*out = *in
if in.GracePeriod != nil {
in, out := &in.GracePeriod, &out.GracePeriod
*out = new(GracePeriodMetadata)
(*in).DeepCopyInto(*out)
}
if in.Migration != nil {
in, out := &in.Migration, &out.Migration
*out = new(MigrationMetadata)
(*in).DeepCopyInto(*out)
}
if in.Backup != nil {
in, out := &in.Backup, &out.Backup
*out = new(BackupMetadata)
(*in).DeepCopyInto(*out)
}
if in.AccessCredential != nil {
in, out := &in.AccessCredential, &out.AccessCredential
*out = new(AccessCredentialMetadata)
**out = **in
}
if in.MemoryDump != nil {
in, out := &in.MemoryDump, &out.MemoryDump
*out = new(MemoryDumpMetadata)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtMetadata.
func (in *KubeVirtMetadata) DeepCopy() *KubeVirtMetadata {
if in == nil {
return nil
}
out := new(KubeVirtMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LaunchSecurity) DeepCopyInto(out *LaunchSecurity) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSecurity.
func (in *LaunchSecurity) DeepCopy() *LaunchSecurity {
if in == nil {
return nil
}
out := new(LaunchSecurity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LinkState) DeepCopyInto(out *LinkState) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinkState.
func (in *LinkState) DeepCopy() *LinkState {
if in == nil {
return nil
}
out := new(LinkState)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Loader) DeepCopyInto(out *Loader) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Loader.
func (in *Loader) DeepCopy() *Loader {
if in == nil {
return nil
}
out := new(Loader)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MAC) DeepCopyInto(out *MAC) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MAC.
func (in *MAC) DeepCopy() *MAC {
if in == nil {
return nil
}
out := new(MAC)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MTU) DeepCopyInto(out *MTU) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTU.
func (in *MTU) DeepCopy() *MTU {
if in == nil {
return nil
}
out := new(MTU)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MaxMemory) DeepCopyInto(out *MaxMemory) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaxMemory.
func (in *MaxMemory) DeepCopy() *MaxMemory {
if in == nil {
return nil
}
out := new(MaxMemory)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemBalloon) DeepCopyInto(out *MemBalloon) {
*out = *in
if in.Stats != nil {
in, out := &in.Stats, &out.Stats
*out = new(Stats)
**out = **in
}
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(MemBalloonDriver)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemBalloon.
func (in *MemBalloon) DeepCopy() *MemBalloon {
if in == nil {
return nil
}
out := new(MemBalloon)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemBalloonDriver) DeepCopyInto(out *MemBalloonDriver) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemBalloonDriver.
func (in *MemBalloonDriver) DeepCopy() *MemBalloonDriver {
if in == nil {
return nil
}
out := new(MemBalloonDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemNode) DeepCopyInto(out *MemNode) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemNode.
func (in *MemNode) DeepCopy() *MemNode {
if in == nil {
return nil
}
out := new(MemNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Memory) DeepCopyInto(out *Memory) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Memory.
func (in *Memory) DeepCopy() *Memory {
if in == nil {
return nil
}
out := new(Memory)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryAddress) DeepCopyInto(out *MemoryAddress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryAddress.
func (in *MemoryAddress) DeepCopy() *MemoryAddress {
if in == nil {
return nil
}
out := new(MemoryAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryAllocation) DeepCopyInto(out *MemoryAllocation) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryAllocation.
func (in *MemoryAllocation) DeepCopy() *MemoryAllocation {
if in == nil {
return nil
}
out := new(MemoryAllocation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryBacking) DeepCopyInto(out *MemoryBacking) {
*out = *in
if in.HugePages != nil {
in, out := &in.HugePages, &out.HugePages
*out = new(HugePages)
(*in).DeepCopyInto(*out)
}
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(MemoryBackingSource)
**out = **in
}
if in.Access != nil {
in, out := &in.Access, &out.Access
*out = new(MemoryBackingAccess)
**out = **in
}
if in.Allocation != nil {
in, out := &in.Allocation, &out.Allocation
*out = new(MemoryAllocation)
**out = **in
}
if in.NoSharePages != nil {
in, out := &in.NoSharePages, &out.NoSharePages
*out = new(NoSharePages)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryBacking.
func (in *MemoryBacking) DeepCopy() *MemoryBacking {
if in == nil {
return nil
}
out := new(MemoryBacking)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryBackingAccess) DeepCopyInto(out *MemoryBackingAccess) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryBackingAccess.
func (in *MemoryBackingAccess) DeepCopy() *MemoryBackingAccess {
if in == nil {
return nil
}
out := new(MemoryBackingAccess)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryBackingSource) DeepCopyInto(out *MemoryBackingSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryBackingSource.
func (in *MemoryBackingSource) DeepCopy() *MemoryBackingSource {
if in == nil {
return nil
}
out := new(MemoryBackingSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryDevice) DeepCopyInto(out *MemoryDevice) {
*out = *in
out.XMLName = in.XMLName
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(MemoryTarget)
(*in).DeepCopyInto(*out)
}
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryDevice.
func (in *MemoryDevice) DeepCopy() *MemoryDevice {
if in == nil {
return nil
}
out := new(MemoryDevice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryDumpMetadata) DeepCopyInto(out *MemoryDumpMetadata) {
*out = *in
if in.StartTimestamp != nil {
in, out := &in.StartTimestamp, &out.StartTimestamp
*out = (*in).DeepCopy()
}
if in.EndTimestamp != nil {
in, out := &in.EndTimestamp, &out.EndTimestamp
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryDumpMetadata.
func (in *MemoryDumpMetadata) DeepCopy() *MemoryDumpMetadata {
if in == nil {
return nil
}
out := new(MemoryDumpMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryTarget) DeepCopyInto(out *MemoryTarget) {
*out = *in
out.Size = in.Size
out.Requested = in.Requested
out.Current = in.Current
out.Block = in.Block
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(MemoryAddress)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryTarget.
func (in *MemoryTarget) DeepCopy() *MemoryTarget {
if in == nil {
return nil
}
out := new(MemoryTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Metadata) DeepCopyInto(out *Metadata) {
*out = *in
in.KubeVirt.DeepCopyInto(&out.KubeVirt)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata.
func (in *Metadata) DeepCopy() *Metadata {
if in == nil {
return nil
}
out := new(Metadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MigrationMetadata) DeepCopyInto(out *MigrationMetadata) {
*out = *in
if in.StartTimestamp != nil {
in, out := &in.StartTimestamp, &out.StartTimestamp
*out = (*in).DeepCopy()
}
if in.EndTimestamp != nil {
in, out := &in.EndTimestamp, &out.EndTimestamp
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationMetadata.
func (in *MigrationMetadata) DeepCopy() *MigrationMetadata {
if in == nil {
return nil
}
out := new(MigrationMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Model) DeepCopyInto(out *Model) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Model.
func (in *Model) DeepCopy() *Model {
if in == nil {
return nil
}
out := new(Model)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NUMA) DeepCopyInto(out *NUMA) {
*out = *in
if in.Cells != nil {
in, out := &in.Cells, &out.Cells
*out = make([]NUMACell, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMA.
func (in *NUMA) DeepCopy() *NUMA {
if in == nil {
return nil
}
out := new(NUMA)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NUMACell) DeepCopyInto(out *NUMACell) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMACell.
func (in *NUMACell) DeepCopy() *NUMACell {
if in == nil {
return nil
}
out := new(NUMACell)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NUMATune) DeepCopyInto(out *NUMATune) {
*out = *in
out.Memory = in.Memory
if in.MemNodes != nil {
in, out := &in.MemNodes, &out.MemNodes
*out = make([]MemNode, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMATune.
func (in *NUMATune) DeepCopy() *NUMATune {
if in == nil {
return nil
}
out := new(NUMATune)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NVRam) DeepCopyInto(out *NVRam) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NVRam.
func (in *NVRam) DeepCopy() *NVRam {
if in == nil {
return nil
}
out := new(NVRam)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NoSharePages) DeepCopyInto(out *NoSharePages) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoSharePages.
func (in *NoSharePages) DeepCopy() *NoSharePages {
if in == nil {
return nil
}
out := new(NoSharePages)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NumaTuneMemory) DeepCopyInto(out *NumaTuneMemory) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NumaTuneMemory.
func (in *NumaTuneMemory) DeepCopy() *NumaTuneMemory {
if in == nil {
return nil
}
out := new(NumaTuneMemory)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OS) DeepCopyInto(out *OS) {
*out = *in
out.Type = in.Type
if in.ACPI != nil {
in, out := &in.ACPI, &out.ACPI
*out = new(OSACPI)
(*in).DeepCopyInto(*out)
}
if in.SMBios != nil {
in, out := &in.SMBios, &out.SMBios
*out = new(SMBios)
**out = **in
}
if in.BootOrder != nil {
in, out := &in.BootOrder, &out.BootOrder
*out = make([]Boot, len(*in))
copy(*out, *in)
}
if in.BootMenu != nil {
in, out := &in.BootMenu, &out.BootMenu
*out = new(BootMenu)
(*in).DeepCopyInto(*out)
}
if in.BIOS != nil {
in, out := &in.BIOS, &out.BIOS
*out = new(BIOS)
**out = **in
}
if in.BootLoader != nil {
in, out := &in.BootLoader, &out.BootLoader
*out = new(Loader)
**out = **in
}
if in.NVRam != nil {
in, out := &in.NVRam, &out.NVRam
*out = new(NVRam)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OS.
func (in *OS) DeepCopy() *OS {
if in == nil {
return nil
}
out := new(OS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSACPI) DeepCopyInto(out *OSACPI) {
*out = *in
if in.Table != nil {
in, out := &in.Table, &out.Table
*out = make([]ACPITable, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSACPI.
func (in *OSACPI) DeepCopy() *OSACPI {
if in == nil {
return nil
}
out := new(OSACPI)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSType) DeepCopyInto(out *OSType) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSType.
func (in *OSType) DeepCopy() *OSType {
if in == nil {
return nil
}
out := new(OSType)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PCIHole64) DeepCopyInto(out *PCIHole64) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PCIHole64.
func (in *PCIHole64) DeepCopy() *PCIHole64 {
if in == nil {
return nil
}
out := new(PCIHole64)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PanicDevice) DeepCopyInto(out *PanicDevice) {
*out = *in
if in.Model != nil {
in, out := &in.Model, &out.Model
*out = new(v1.PanicDeviceModel)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PanicDevice.
func (in *PanicDevice) DeepCopy() *PanicDevice {
if in == nil {
return nil
}
out := new(PanicDevice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReadOnly) DeepCopyInto(out *ReadOnly) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadOnly.
func (in *ReadOnly) DeepCopy() *ReadOnly {
if in == nil {
return nil
}
out := new(ReadOnly)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RedirectedDevice) DeepCopyInto(out *RedirectedDevice) {
*out = *in
out.Source = in.Source
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectedDevice.
func (in *RedirectedDevice) DeepCopy() *RedirectedDevice {
if in == nil {
return nil
}
out := new(RedirectedDevice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RedirectedDeviceSource) DeepCopyInto(out *RedirectedDeviceSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectedDeviceSource.
func (in *RedirectedDeviceSource) DeepCopy() *RedirectedDeviceSource {
if in == nil {
return nil
}
out := new(RedirectedDeviceSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Reservations) DeepCopyInto(out *Reservations) {
*out = *in
if in.SourceReservations != nil {
in, out := &in.SourceReservations, &out.SourceReservations
*out = new(SourceReservations)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reservations.
func (in *Reservations) DeepCopy() *Reservations {
if in == nil {
return nil
}
out := new(Reservations)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Resource) DeepCopyInto(out *Resource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resource.
func (in *Resource) DeepCopy() *Resource {
if in == nil {
return nil
}
out := new(Resource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Rng) DeepCopyInto(out *Rng) {
*out = *in
if in.Backend != nil {
in, out := &in.Backend, &out.Backend
*out = new(RngBackend)
**out = **in
}
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(RngDriver)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rng.
func (in *Rng) DeepCopy() *Rng {
if in == nil {
return nil
}
out := new(Rng)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RngBackend) DeepCopyInto(out *RngBackend) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RngBackend.
func (in *RngBackend) DeepCopy() *RngBackend {
if in == nil {
return nil
}
out := new(RngBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RngDriver) DeepCopyInto(out *RngDriver) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RngDriver.
func (in *RngDriver) DeepCopy() *RngDriver {
if in == nil {
return nil
}
out := new(RngDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RngRate) DeepCopyInto(out *RngRate) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RngRate.
func (in *RngRate) DeepCopy() *RngRate {
if in == nil {
return nil
}
out := new(RngRate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Rom) DeepCopyInto(out *Rom) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rom.
func (in *Rom) DeepCopy() *Rom {
if in == nil {
return nil
}
out := new(Rom)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SEVNodeParameters) DeepCopyInto(out *SEVNodeParameters) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SEVNodeParameters.
func (in *SEVNodeParameters) DeepCopy() *SEVNodeParameters {
if in == nil {
return nil
}
out := new(SEVNodeParameters)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SMBios) DeepCopyInto(out *SMBios) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMBios.
func (in *SMBios) DeepCopy() *SMBios {
if in == nil {
return nil
}
out := new(SMBios)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretSpec) DeepCopyInto(out *SecretSpec) {
*out = *in
out.XMLName = in.XMLName
out.Usage = in.Usage
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec.
func (in *SecretSpec) DeepCopy() *SecretSpec {
if in == nil {
return nil
}
out := new(SecretSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretUsage) DeepCopyInto(out *SecretUsage) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretUsage.
func (in *SecretUsage) DeepCopy() *SecretUsage {
if in == nil {
return nil
}
out := new(SecretUsage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Serial) DeepCopyInto(out *Serial) {
*out = *in
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(SerialTarget)
(*in).DeepCopyInto(*out)
}
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(SerialSource)
**out = **in
}
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
if in.Log != nil {
in, out := &in.Log, &out.Log
*out = new(SerialLog)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Serial.
func (in *Serial) DeepCopy() *Serial {
if in == nil {
return nil
}
out := new(Serial)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SerialLog) DeepCopyInto(out *SerialLog) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerialLog.
func (in *SerialLog) DeepCopy() *SerialLog {
if in == nil {
return nil
}
out := new(SerialLog)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SerialSource) DeepCopyInto(out *SerialSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerialSource.
func (in *SerialSource) DeepCopy() *SerialSource {
if in == nil {
return nil
}
out := new(SerialSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SerialTarget) DeepCopyInto(out *SerialTarget) {
*out = *in
if in.Port != nil {
in, out := &in.Port, &out.Port
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerialTarget.
func (in *SerialTarget) DeepCopy() *SerialTarget {
if in == nil {
return nil
}
out := new(SerialTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Shareable) DeepCopyInto(out *Shareable) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Shareable.
func (in *Shareable) DeepCopy() *Shareable {
if in == nil {
return nil
}
out := new(Shareable)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Slice) DeepCopyInto(out *Slice) {
*out = *in
out.Slice = in.Slice
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Slice.
func (in *Slice) DeepCopy() *Slice {
if in == nil {
return nil
}
out := new(Slice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SliceType) DeepCopyInto(out *SliceType) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SliceType.
func (in *SliceType) DeepCopy() *SliceType {
if in == nil {
return nil
}
out := new(SliceType)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SoundCard) DeepCopyInto(out *SoundCard) {
*out = *in
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoundCard.
func (in *SoundCard) DeepCopy() *SoundCard {
if in == nil {
return nil
}
out := new(SoundCard)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SourceReservations) DeepCopyInto(out *SourceReservations) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceReservations.
func (in *SourceReservations) DeepCopy() *SourceReservations {
if in == nil {
return nil
}
out := new(SourceReservations)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Stats) DeepCopyInto(out *Stats) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stats.
func (in *Stats) DeepCopy() *Stats {
if in == nil {
return nil
}
out := new(Stats)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyNICTimer) DeepCopyInto(out *SyNICTimer) {
*out = *in
if in.Direct != nil {
in, out := &in.Direct, &out.Direct
*out = new(FeatureState)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyNICTimer.
func (in *SyNICTimer) DeepCopy() *SyNICTimer {
if in == nil {
return nil
}
out := new(SyNICTimer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SysInfo) DeepCopyInto(out *SysInfo) {
*out = *in
if in.System != nil {
in, out := &in.System, &out.System
*out = make([]Entry, len(*in))
copy(*out, *in)
}
if in.BIOS != nil {
in, out := &in.BIOS, &out.BIOS
*out = make([]Entry, len(*in))
copy(*out, *in)
}
if in.BaseBoard != nil {
in, out := &in.BaseBoard, &out.BaseBoard
*out = make([]Entry, len(*in))
copy(*out, *in)
}
if in.Chassis != nil {
in, out := &in.Chassis, &out.Chassis
*out = make([]Entry, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SysInfo.
func (in *SysInfo) DeepCopy() *SysInfo {
if in == nil {
return nil
}
out := new(SysInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TPM) DeepCopyInto(out *TPM) {
*out = *in
out.Backend = in.Backend
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TPM.
func (in *TPM) DeepCopy() *TPM {
if in == nil {
return nil
}
out := new(TPM)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TPMBackend) DeepCopyInto(out *TPMBackend) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TPMBackend.
func (in *TPMBackend) DeepCopy() *TPMBackend {
if in == nil {
return nil
}
out := new(TPMBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Timer) DeepCopyInto(out *Timer) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timer.
func (in *Timer) DeepCopy() *Timer {
if in == nil {
return nil
}
out := new(Timer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Timezone) DeepCopyInto(out *Timezone) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timezone.
func (in *Timezone) DeepCopy() *Timezone {
if in == nil {
return nil
}
out := new(Timezone)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *User) DeepCopyInto(out *User) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User.
func (in *User) DeepCopy() *User {
if in == nil {
return nil
}
out := new(User)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VCPU) DeepCopyInto(out *VCPU) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCPU.
func (in *VCPU) DeepCopy() *VCPU {
if in == nil {
return nil
}
out := new(VCPU)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VCPUs) DeepCopyInto(out *VCPUs) {
*out = *in
if in.VCPU != nil {
in, out := &in.VCPU, &out.VCPU
*out = make([]VCPUsVCPU, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCPUs.
func (in *VCPUs) DeepCopy() *VCPUs {
if in == nil {
return nil
}
out := new(VCPUs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VCPUsVCPU) DeepCopyInto(out *VCPUsVCPU) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCPUsVCPU.
func (in *VCPUsVCPU) DeepCopy() *VCPUsVCPU {
if in == nil {
return nil
}
out := new(VCPUsVCPU)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VSOCK) DeepCopyInto(out *VSOCK) {
*out = *in
out.CID = in.CID
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSOCK.
func (in *VSOCK) DeepCopy() *VSOCK {
if in == nil {
return nil
}
out := new(VSOCK)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Video) DeepCopyInto(out *Video) {
*out = *in
in.Model.DeepCopyInto(&out.Model)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Video.
func (in *Video) DeepCopy() *Video {
if in == nil {
return nil
}
out := new(Video)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VideoModel) DeepCopyInto(out *VideoModel) {
*out = *in
if in.Heads != nil {
in, out := &in.Heads, &out.Heads
*out = new(uint)
**out = **in
}
if in.Ram != nil {
in, out := &in.Ram, &out.Ram
*out = new(uint)
**out = **in
}
if in.VRam != nil {
in, out := &in.VRam, &out.VRam
*out = new(uint)
**out = **in
}
if in.VGAMem != nil {
in, out := &in.VGAMem, &out.VGAMem
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VideoModel.
func (in *VideoModel) DeepCopy() *VideoModel {
if in == nil {
return nil
}
out := new(VideoModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Watchdog) DeepCopyInto(out *Watchdog) {
*out = *in
if in.Alias != nil {
in, out := &in.Alias, &out.Alias
*out = new(Alias)
**out = **in
}
if in.Address != nil {
in, out := &in.Address, &out.Address
*out = new(Address)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Watchdog.
func (in *Watchdog) DeepCopy() *Watchdog {
if in == nil {
return nil
}
out := new(Watchdog)
in.DeepCopyInto(out)
return out
}
package api
import archdefaulter "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api/arch-defaulter"
const (
DefaultProtocol = "TCP"
DefaultVMCIDR = "10.0.2.0/24"
DefaultVMIpv6CIDR = "fd10:0:2::/120"
DefaultBridgeName = "k6t-eth0"
)
func NewDefaulter(arch string) *Defaulter {
return &Defaulter{
ArchDefaulter: archdefaulter.NewArchDefaulter(arch),
}
}
type Defaulter struct {
ArchDefaulter archdefaulter.ArchDefaulter
}
func (d *Defaulter) setDefaults_OSType(ostype *OSType) {
ostype.OS = "hvm"
if ostype.Arch == "" {
ostype.Arch = d.ArchDefaulter.OSTypeArch()
}
// TODO: we probably want to select concrete type in the future for "future-backwards" compatibility.
if ostype.Machine == "" {
ostype.Machine = d.ArchDefaulter.OSTypeMachine()
}
}
func (d *Defaulter) setDefaults_DomainSpec(spec *DomainSpec) {
spec.XmlNS = "http://libvirt.org/schemas/domain/qemu/1.0"
if spec.Type == "" {
spec.Type = "kvm"
}
}
func (d *Defaulter) SetObjectDefaults_Domain(in *Domain) {
d.setDefaults_DomainSpec(&in.Spec)
d.setDefaults_OSType(&in.Spec.OS.Type)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017,The KubeVirt Authors.
*
*/
package api
import (
"encoding/json"
"encoding/xml"
"fmt"
"strings"
kubev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/precond"
)
// For versioning of the virt-handler and -launcher communication,
// you need to increase the Version const when making changes,
// and make necessary changes in the cmd and notify rpc implementation!
const (
DomainVersion = "v1"
)
type LifeCycle string
type StateChangeReason string
const (
NoState LifeCycle = "NoState"
Running LifeCycle = "Running"
Blocked LifeCycle = "Blocked"
Paused LifeCycle = "Paused"
Shutdown LifeCycle = "ShuttingDown"
Shutoff LifeCycle = "Shutoff"
Crashed LifeCycle = "Crashed"
PMSuspended LifeCycle = "PMSuspended"
// Common reasons
ReasonUnknown StateChangeReason = "Unknown"
// ShuttingDown reasons
ReasonUser StateChangeReason = "User"
// Shutoff reasons
ReasonShutdown StateChangeReason = "Shutdown"
ReasonDestroyed StateChangeReason = "Destroyed"
ReasonMigrated StateChangeReason = "Migrated"
ReasonCrashed StateChangeReason = "Crashed"
ReasonPanicked StateChangeReason = "Panicked"
ReasonSaved StateChangeReason = "Saved"
ReasonFailed StateChangeReason = "Failed"
ReasonFromSnapshot StateChangeReason = "FromSnapshot"
// NoState reasons
ReasonNonExistent StateChangeReason = "NonExistent"
// Pause reasons
ReasonPausedUnknown StateChangeReason = "Unknown"
ReasonPausedUser StateChangeReason = "User"
ReasonPausedMigration StateChangeReason = "Migration"
ReasonPausedSave StateChangeReason = "Save"
ReasonPausedDump StateChangeReason = "Dump"
ReasonPausedIOError StateChangeReason = "IOError"
ReasonPausedWatchdog StateChangeReason = "Watchdog"
ReasonPausedFromSnapshot StateChangeReason = "FromSnapshot"
ReasonPausedShuttingDown StateChangeReason = "ShuttingDown"
ReasonPausedSnapshot StateChangeReason = "Snapshot"
ReasonPausedCrashed StateChangeReason = "Crashed"
ReasonPausedStartingUp StateChangeReason = "StartingUp"
ReasonPausedPostcopy StateChangeReason = "Postcopy"
ReasonPausedPostcopyFailed StateChangeReason = "PostcopyFailed"
UserAliasPrefix = "ua-"
FSThawed = "thawed"
FSFrozen = "frozen"
SchedulerFIFO = "fifo"
HostDevicePCI = "pci"
HostDeviceMDev = "mdev"
HostDeviceUSB = "usb"
AddressPCI = "pci"
AddressCCW = "ccw"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Domain struct {
metav1.TypeMeta
metav1.ObjectMeta `json:"ObjectMeta"`
Spec DomainSpec
Status DomainStatus
}
type DomainStatus struct {
Status LifeCycle
Reason StateChangeReason
Interfaces []InterfaceStatus
OSInfo GuestOSInfo
FSFreezeStatus FSFreeze
}
type DomainSysInfo struct {
Hostname string
OSInfo GuestOSInfo
Timezone Timezone
}
type GuestOSInfo struct {
Name string
KernelRelease string
Version string
PrettyName string
VersionId string
KernelVersion string
Machine string
Id string
}
type InterfaceStatus struct {
Mac string
Ip string
IPs []string
InterfaceName string
}
type SEVNodeParameters struct {
PDH string
CertChain string
}
type Timezone struct {
Zone string
Offset int
}
type FSFreeze struct {
Status string
}
type FSDisk struct {
Serial string
BusType string
}
type Filesystem struct {
Name string
Mountpoint string
Type string
UsedBytes int
TotalBytes int
Disk []FSDisk
}
type User struct {
Name string
Domain string
LoginTime float64
}
// DomainGuestInfo represent guest agent info for specific domain
type DomainGuestInfo struct {
Interfaces []InterfaceStatus
OSInfo *GuestOSInfo
FSFreezeStatus *FSFreeze
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DomainList struct {
metav1.TypeMeta
ListMeta metav1.ListMeta
Items []Domain
}
// DomainSpec represents the actual conversion to libvirt XML. The fields must be
// tagged, and they must correspond to the libvirt domain as described in
// https://libvirt.org/formatdomain.html.
type DomainSpec struct {
XMLName xml.Name `xml:"domain"`
Type string `xml:"type,attr"`
XmlNS string `xml:"xmlns:qemu,attr,omitempty"`
Name string `xml:"name"`
UUID string `xml:"uuid,omitempty"`
Memory Memory `xml:"memory"`
CurrentMemory *Memory `xml:"currentMemory,omitempty"`
MaxMemory *MaxMemory `xml:"maxMemory,omitempty"`
MemoryBacking *MemoryBacking `xml:"memoryBacking,omitempty"`
OS OS `xml:"os"`
SysInfo *SysInfo `xml:"sysinfo,omitempty"`
Devices Devices `xml:"devices"`
Clock *Clock `xml:"clock,omitempty"`
Resource *Resource `xml:"resource,omitempty"`
QEMUCmd *Commandline `xml:"qemu:commandline,omitempty"`
Metadata Metadata `xml:"metadata,omitempty"`
Features *Features `xml:"features,omitempty"`
CPU CPU `xml:"cpu"`
VCPU *VCPU `xml:"vcpu"`
VCPUs *VCPUs `xml:"vcpus"`
CPUTune *CPUTune `xml:"cputune"`
NUMATune *NUMATune `xml:"numatune"`
IOThreads *IOThreads `xml:"iothreads,omitempty"`
LaunchSecurity *LaunchSecurity `xml:"launchSecurity,omitempty"`
}
type CPUTune struct {
VCPUPin []CPUTuneVCPUPin `xml:"vcpupin"`
IOThreadPin []CPUTuneIOThreadPin `xml:"iothreadpin,omitempty"`
EmulatorPin *CPUEmulatorPin `xml:"emulatorpin"`
}
type NUMATune struct {
Memory NumaTuneMemory `xml:"memory"`
MemNodes []MemNode `xml:"memnode"`
}
type MemNode struct {
CellID uint32 `xml:"cellid,attr"`
Mode string `xml:"mode,attr"`
NodeSet string `xml:"nodeset,attr"`
}
type NumaTuneMemory struct {
Mode string `xml:"mode,attr"`
NodeSet string `xml:"nodeset,attr"`
}
type CPUTuneVCPUPin struct {
VCPU uint32 `xml:"vcpu,attr"`
CPUSet string `xml:"cpuset,attr"`
}
type CPUTuneIOThreadPin struct {
IOThread uint32 `xml:"iothread,attr"`
CPUSet string `xml:"cpuset,attr"`
}
type CPUEmulatorPin struct {
CPUSet string `xml:"cpuset,attr"`
}
type VCPU struct {
Placement string `xml:"placement,attr"`
CPUs uint32 `xml:",chardata"`
}
type VCPUsVCPU struct {
ID uint32 `xml:"id,attr"`
Enabled string `xml:"enabled,attr,omitempty"`
Hotpluggable string `xml:"hotpluggable,attr,omitempty"`
Order uint32 `xml:"order,attr,omitempty"`
}
type VCPUs struct {
VCPU []VCPUsVCPU `xml:"vcpu"`
}
type CPU struct {
Mode string `xml:"mode,attr,omitempty"`
Model string `xml:"model,omitempty"`
Features []CPUFeature `xml:"feature"`
Topology *CPUTopology `xml:"topology"`
NUMA *NUMA `xml:"numa,omitempty"`
}
type NUMA struct {
Cells []NUMACell `xml:"cell"`
}
type NUMACell struct {
ID string `xml:"id,attr"`
CPUs string `xml:"cpus,attr"`
Memory uint64 `xml:"memory,attr,omitempty"`
Unit string `xml:"unit,attr,omitempty"`
MemoryAccess string `xml:"memAccess,attr,omitempty"`
}
type CPUFeature struct {
Name string `xml:"name,attr"`
Policy string `xml:"policy,attr,omitempty"`
}
type CPUTopology struct {
Sockets uint32 `xml:"sockets,attr,omitempty"`
Cores uint32 `xml:"cores,attr,omitempty"`
Threads uint32 `xml:"threads,attr,omitempty"`
}
type Features struct {
ACPI *FeatureEnabled `xml:"acpi,omitempty"`
APIC *FeatureEnabled `xml:"apic,omitempty"`
Hyperv *FeatureHyperv `xml:"hyperv,omitempty"`
SMM *FeatureEnabled `xml:"smm,omitempty"`
KVM *FeatureKVM `xml:"kvm,omitempty"`
PVSpinlock *FeaturePVSpinlock `xml:"pvspinlock,omitempty"`
PMU *FeatureState `xml:"pmu,omitempty"`
VMPort *FeatureState `xml:"vmport,omitempty"`
}
const HypervModePassthrough = "passthrough"
type FeatureHyperv struct {
Mode string `xml:"mode,attr,omitempty"`
Relaxed *FeatureState `xml:"relaxed,omitempty"`
VAPIC *FeatureState `xml:"vapic,omitempty"`
Spinlocks *FeatureSpinlocks `xml:"spinlocks,omitempty"`
VPIndex *FeatureState `xml:"vpindex,omitempty"`
Runtime *FeatureState `xml:"runtime,omitempty"`
SyNIC *FeatureState `xml:"synic,omitempty"`
SyNICTimer *SyNICTimer `xml:"stimer,omitempty"`
Reset *FeatureState `xml:"reset,omitempty"`
VendorID *FeatureVendorID `xml:"vendor_id,omitempty"`
Frequencies *FeatureState `xml:"frequencies,omitempty"`
Reenlightenment *FeatureState `xml:"reenlightenment,omitempty"`
TLBFlush *FeatureState `xml:"tlbflush,omitempty"`
IPI *FeatureState `xml:"ipi,omitempty"`
EVMCS *FeatureState `xml:"evmcs,omitempty"`
}
type FeatureSpinlocks struct {
State string `xml:"state,attr,omitempty"`
Retries *uint32 `xml:"retries,attr,omitempty"`
}
type SyNICTimer struct {
Direct *FeatureState `xml:"direct,omitempty"`
State string `xml:"state,attr,omitempty"`
}
type FeaturePVSpinlock struct {
State string `xml:"state,attr,omitempty"`
}
type FeatureVendorID struct {
State string `xml:"state,attr,omitempty"`
Value string `xml:"value,attr,omitempty"`
}
type FeatureEnabled struct {
}
type Shareable struct{}
type Slice struct {
Slice SliceType `xml:"slice,omitempty"`
}
type SliceType struct {
Type string `xml:"type,attr"`
Offset int64 `xml:"offset,attr"`
Size int64 `xml:"size,attr"`
}
type FeatureState struct {
State string `xml:"state,attr,omitempty"`
}
type FeatureKVM struct {
Hidden *FeatureState `xml:"hidden,omitempty"`
HintDedicated *FeatureState `xml:"hint-dedicated,omitempty"`
}
type Metadata struct {
// KubeVirt contains kubevirt related metadata
// Note: Libvirt only accept one element at metadata root with a specific namespace
KubeVirt KubeVirtMetadata `xml:"http://kubevirt.io kubevirt"`
}
type KubeVirtMetadata struct {
UID types.UID `xml:"uid"`
GracePeriod *GracePeriodMetadata `xml:"graceperiod,omitempty"`
Migration *MigrationMetadata `xml:"migration,omitempty"`
Backup *BackupMetadata `xml:"backup,omitempty"`
AccessCredential *AccessCredentialMetadata `xml:"accessCredential,omitempty"`
MemoryDump *MemoryDumpMetadata `xml:"memoryDump,omitempty"`
}
type AccessCredentialMetadata struct {
Succeeded bool `xml:"succeeded,omitempty"`
Message string `xml:"message,omitempty"`
}
type MemoryDumpMetadata struct {
FileName string `xml:"fileName,omitempty"`
StartTimestamp *metav1.Time `xml:"startTimestamp,omitempty"`
EndTimestamp *metav1.Time `xml:"endTimestamp,omitempty"`
Completed bool `xml:"completed,omitempty"`
Failed bool `xml:"failed,omitempty"`
FailureReason string `xml:"failureReason,omitempty"`
}
type MigrationMetadata struct {
UID types.UID `xml:"uid,omitempty"`
StartTimestamp *metav1.Time `xml:"startTimestamp,omitempty"`
EndTimestamp *metav1.Time `xml:"endTimestamp,omitempty"`
Failed bool `xml:"failed,omitempty"`
FailureReason string `xml:"failureReason,omitempty"`
AbortStatus string `xml:"abortStatus,omitempty"`
Mode v1.MigrationMode `xml:"mode,omitempty"`
}
type BackupMetadata struct {
Name string `xml:"name,omitempty"`
SkipQuiesce bool `xml:"skipQuiesce,omitempty"`
StartTimestamp *metav1.Time `xml:"startTimestamp,omitempty"`
EndTimestamp *metav1.Time `xml:"endTimestamp,omitempty"`
Completed bool `xml:"completed,omitempty"`
BackupMsg string `xml:"backupMsg,omitempty"`
CheckpointName string `xml:"checkpointName,omitempty"`
}
type GracePeriodMetadata struct {
DeletionGracePeriodSeconds int64 `xml:"deletionGracePeriodSeconds"`
DeletionTimestamp *metav1.Time `xml:"deletionTimestamp,omitempty"`
MarkedForGracefulShutdown *bool `xml:"markedForGracefulShutdown,omitempty"`
}
// DomainBackup mirroring libvirt XML under https://libvirt.org/formatbackup.html#backup-xml-format
type DomainBackup struct {
XMLName xml.Name `xml:"domainbackup"`
Mode string `xml:"mode,attr"`
Incremental *string `xml:"incremental,omitempty"`
BackupDisks *BackupDisks `xml:"disks"`
}
type BackupDisks struct {
Disks []BackupDisk `xml:"disk"`
}
type BackupDisk struct {
Name string `xml:"name,attr"`
Backup string `xml:"backup,attr"`
Type string `xml:"type,attr,omitempty"`
Target *BackupTarget `xml:"target,omitempty"`
}
type BackupTarget struct {
File string `xml:"file,attr,omitempty"`
}
// DomainCheckpoint mirroring libvirt XML under https://libvirt.org/formatcheckpoint.html#checkpoint-xml
type DomainCheckpoint struct {
XMLName xml.Name `xml:"domaincheckpoint"`
Name string `xml:"Name"`
CheckpointDisks *CheckpointDisks `xml:"disks"`
CreationTime *uint64 `xml:"creationTime"`
Parent *CheckpointParent `xml:"parent"`
}
type CheckpointDisks struct {
Disks []CheckpointDisk `xml:"disk"`
}
type CheckpointDisk struct {
Name string `xml:"name,attr"`
Checkpoint string `xml:"checkpoint,attr"`
}
type CheckpointParent struct {
Name string `xml:"name"`
}
type Commandline struct {
QEMUEnv []Env `xml:"qemu:env,omitempty"`
QEMUArg []Arg `xml:"qemu:arg,omitempty"`
}
type Env struct {
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
}
type Arg struct {
Value string `xml:"value,attr"`
}
type Resource struct {
Partition string `xml:"partition"`
}
type Memory struct {
Value uint64 `xml:",chardata"`
Unit string `xml:"unit,attr"`
}
type MaxMemory struct {
Value uint64 `xml:",chardata"`
Unit string `xml:"unit,attr"`
Slots uint64 `xml:"slots,attr"`
}
// MemoryBacking mirroring libvirt XML under https://libvirt.org/formatdomain.html#elementsMemoryBacking
type MemoryBacking struct {
HugePages *HugePages `xml:"hugepages,omitempty"`
Source *MemoryBackingSource `xml:"source,omitempty"`
Access *MemoryBackingAccess `xml:"access,omitempty"`
Allocation *MemoryAllocation `xml:"allocation,omitempty"`
NoSharePages *NoSharePages `xml:"nosharepages,omitempty"`
}
type MemoryAllocationMode string
const (
MemoryAllocationModeImmediate MemoryAllocationMode = "immediate"
)
type MemoryAllocation struct {
Mode MemoryAllocationMode `xml:"mode,attr"`
}
type MemoryBackingSource struct {
Type string `xml:"type,attr"`
}
// HugePages mirroring libvirt XML under memoryBacking
type HugePages struct {
HugePage []HugePage `xml:"page,omitempty"`
}
// HugePage mirroring libvirt XML under hugepages
type HugePage struct {
Size string `xml:"size,attr"`
Unit string `xml:"unit,attr"`
NodeSet string `xml:"nodeset,attr"`
}
type MemoryBackingAccess struct {
Mode string `xml:"mode,attr"`
}
type NoSharePages struct {
}
type MemoryAddress struct {
Base string `xml:"base,attr"`
}
type MemoryTarget struct {
Size Memory `xml:"size"`
Requested Memory `xml:"requested"`
Current Memory `xml:"current"`
Node string `xml:"node"`
Block Memory `xml:"block"`
Address *MemoryAddress `xml:"address,omitempty"`
}
type MemoryDevice struct {
XMLName xml.Name `xml:"memory"`
Model string `xml:"model,attr"`
Target *MemoryTarget `xml:"target"`
Alias *Alias `xml:"alias,omitempty"`
Address *Address `xml:"address,omitempty"`
}
type Devices struct {
Emulator string `xml:"emulator,omitempty"`
Interfaces []Interface `xml:"interface"`
Channels []Channel `xml:"channel"`
HostDevices []HostDevice `xml:"hostdev,omitempty"`
PanicDevices []PanicDevice `xml:"panic,omitempty"`
Controllers []Controller `xml:"controller,omitempty"`
Video []Video `xml:"video"`
Graphics []Graphics `xml:"graphics"`
Ballooning *MemBalloon `xml:"memballoon,omitempty"`
Disks []Disk `xml:"disk"`
Inputs []Input `xml:"input"`
Serials []Serial `xml:"serial"`
Consoles []Console `xml:"console"`
Watchdogs []Watchdog `xml:"watchdog,omitempty"`
Rng *Rng `xml:"rng,omitempty"`
Filesystems []FilesystemDevice `xml:"filesystem,omitempty"`
Redirs []RedirectedDevice `xml:"redirdev,omitempty"`
SoundCards []SoundCard `xml:"sound,omitempty"`
TPMs []TPM `xml:"tpm,omitempty"`
VSOCK *VSOCK `xml:"vsock,omitempty"`
Memory *MemoryDevice `xml:"memory,omitempty"`
}
type PanicDevice struct {
Model *v1.PanicDeviceModel `xml:"model,attr,omitempty"`
}
type TPM struct {
Model string `xml:"model,attr"`
Backend TPMBackend `xml:"backend"`
}
type TPMBackend struct {
Type string `xml:"type,attr"`
Version string `xml:"version,attr"`
PersistentState string `xml:"persistent_state,attr,omitempty"`
}
// RedirectedDevice describes a device to be redirected
// See: https://libvirt.org/formatdomain.html#redirected-devices
type RedirectedDevice struct {
Type string `xml:"type,attr"`
Bus string `xml:"bus,attr"`
Source RedirectedDeviceSource `xml:"source"`
}
type RedirectedDeviceSource struct {
Mode string `xml:"mode,attr"`
Path string `xml:"path,attr"`
}
type FilesystemDevice struct {
Type string `xml:"type,attr"`
AccessMode string `xml:"accessMode,attr"`
Source *FilesystemSource `xml:"source,omitempty"`
Target *FilesystemTarget `xml:"target,omitempty"`
Driver *FilesystemDriver `xml:"driver,omitempty"`
Binary *FilesystemBinary `xml:"binary,omitempty"`
}
type FilesystemTarget struct {
Dir string `xml:"dir,attr,omitempty"`
}
type FilesystemSource struct {
Dir string `xml:"dir,attr"`
Socket string `xml:"socket,attr,omitempty"`
}
type FilesystemDriver struct {
Type string `xml:"type,attr"`
Queue string `xml:"queue,attr,omitempty"`
}
type FilesystemBinary struct {
Path string `xml:"path,attr,omitempty"`
Xattr string `xml:"xattr,attr,omitempty"`
Cache *FilesystemBinaryCache `xml:"cache,omitempty"`
Lock *FilesystemBinaryLock `xml:"lock,omitempty"`
}
type FilesystemBinaryCache struct {
Mode string `xml:"mode,attr,omitempty"`
}
type FilesystemBinaryLock struct {
Posix string `xml:"posix,attr,omitempty"`
Flock string `xml:"flock,attr,omitempty"`
}
// Input represents input device, e.g. tablet
type Input struct {
Type v1.InputType `xml:"type,attr"`
Bus v1.InputBus `xml:"bus,attr"`
Alias *Alias `xml:"alias,omitempty"`
Address *Address `xml:"address,omitempty"`
Model string `xml:"model,attr,omitempty"`
}
// BEGIN HostDevice -----------------------------
type HostDevice struct {
XMLName xml.Name `xml:"hostdev"`
Source HostDeviceSource `xml:"source"`
Type string `xml:"type,attr"`
BootOrder *BootOrder `xml:"boot,omitempty"`
Managed string `xml:"managed,attr,omitempty"`
Mode string `xml:"mode,attr,omitempty"`
Model string `xml:"model,attr,omitempty"`
Address *Address `xml:"address,omitempty"`
Alias *Alias `xml:"alias,omitempty"`
Display string `xml:"display,attr,omitempty"`
RamFB string `xml:"ramfb,attr,omitempty"`
}
type HostDeviceSource struct {
Address *Address `xml:"address,omitempty"`
}
// END HostDevice -----------------------------
// BEGIN Controller -----------------------------
// Controller represens libvirt controller element https://libvirt.org/formatdomain.html#elementsControllers
type Controller struct {
Type string `xml:"type,attr"`
Index string `xml:"index,attr"`
Model string `xml:"model,attr,omitempty"`
Driver *ControllerDriver `xml:"driver,omitempty"`
Alias *Alias `xml:"alias,omitempty"`
Address *Address `xml:"address,omitempty"`
PCIHole64 *PCIHole64 `xml:"pcihole64,omitempty"`
}
// END Controller -----------------------------
// BEGIN ControllerDriver
type ControllerDriver struct {
IOThread *uint `xml:"iothread,attr,omitempty"`
Queues *uint `xml:"queues,attr,omitempty"`
IOMMU string `xml:"iommu,attr,omitempty"`
}
// END ControllerDriver
// BEGIN PCIHole64
type PCIHole64 struct {
Value uint `xml:",chardata"`
Unit string `xml:"unit,attr,omitempty"`
}
// END PCIHole64
// BEGIN Disk -----------------------------
type Disk struct {
Device string `xml:"device,attr"`
Snapshot string `xml:"snapshot,attr,omitempty"`
Type string `xml:"type,attr"`
Source DiskSource `xml:"source"`
Target DiskTarget `xml:"target"`
Serial string `xml:"serial,omitempty"`
Driver *DiskDriver `xml:"driver,omitempty"`
ReadOnly *ReadOnly `xml:"readonly,omitempty"`
Auth *DiskAuth `xml:"auth,omitempty"`
Alias *Alias `xml:"alias,omitempty"`
BackingStore *BackingStore `xml:"backingStore,omitempty"`
BootOrder *BootOrder `xml:"boot,omitempty"`
Address *Address `xml:"address,omitempty"`
Model string `xml:"model,attr,omitempty"`
BlockIO *BlockIO `xml:"blockio,omitempty"`
FilesystemOverhead *v1.Percent `xml:"filesystemOverhead,omitempty"`
Capacity *int64 `xml:"capacity,omitempty"`
ExpandDisksEnabled bool `xml:"expandDisksEnabled,omitempty"`
Shareable *Shareable `xml:"shareable,omitempty"`
}
type DiskAuth struct {
Username string `xml:"username,attr"`
Secret *DiskSecret `xml:"secret,omitempty"`
}
type DiskSecret struct {
Type string `xml:"type,attr"`
Usage string `xml:"usage,attr,omitempty"`
UUID string `xml:"uuid,attr,omitempty"`
}
type ReadOnly struct{}
type DiskSource struct {
Dev string `xml:"dev,attr,omitempty"`
File string `xml:"file,attr,omitempty"`
StartupPolicy string `xml:"startupPolicy,attr,omitempty"`
Protocol string `xml:"protocol,attr,omitempty"`
Name string `xml:"name,attr,omitempty"`
Host *DiskSourceHost `xml:"host,omitempty"`
Reservations *Reservations `xml:"reservations,omitempty"`
Slices []Slice `xml:"slices,omitempty"`
DataStore *DataStore `xml:"dataStore,omitempty"`
}
type DiskTarget struct {
Bus v1.DiskBus `xml:"bus,attr,omitempty"`
Device string `xml:"dev,attr,omitempty"`
Tray string `xml:"tray,attr,omitempty"`
}
type DiskDriver struct {
Cache string `xml:"cache,attr,omitempty"`
ErrorPolicy v1.DiskErrorPolicy `xml:"error_policy,attr,omitempty"`
IO v1.DriverIO `xml:"io,attr,omitempty"`
Name string `xml:"name,attr"`
Type string `xml:"type,attr"`
IOThread *uint `xml:"iothread,attr,omitempty"`
IOThreads *DiskIOThreads `xml:"iothreads"`
Queues *uint `xml:"queues,attr,omitempty"`
Discard string `xml:"discard,attr,omitempty"`
IOMMU string `xml:"iommu,attr,omitempty"`
}
type DiskIOThreads struct {
IOThread []DiskIOThread `xml:"iothread"`
}
type DiskIOThread struct {
Id uint32 `xml:"id,attr"`
}
type DiskSourceHost struct {
Name string `xml:"name,attr"`
Port string `xml:"port,attr,omitempty"`
}
type DataStore struct {
Type string `xml:"type,attr,omitempty"`
Format *DataStoreFormat `xml:"format,omitempty"`
Source *DiskSource `xml:"source,omitempty"`
}
type DataStoreFormat struct {
Type string `xml:"type,attr"`
}
type BackingStore struct {
Type string `xml:"type,attr,omitempty"`
Format *BackingStoreFormat `xml:"format,omitempty"`
Source *DiskSource `xml:"source,omitempty"`
}
type BackingStoreFormat struct {
Type string `xml:"type,attr"`
}
type BlockIO struct {
LogicalBlockSize uint `xml:"logical_block_size,attr,omitempty"`
PhysicalBlockSize uint `xml:"physical_block_size,attr,omitempty"`
DiscardGranularity *uint `xml:"discard_granularity,attr,omitempty"`
}
type Reservations struct {
Managed string `xml:"managed,attr,omitempty"`
SourceReservations *SourceReservations `xml:"source,omitempty"`
}
type SourceReservations struct {
Type string `xml:"type,attr"`
Path string `xml:"path,attr,omitempty"`
Mode string `xml:"mode,attr,omitempty"`
}
// END Disk -----------------------------
// BEGIN Serial -----------------------------
type Serial struct {
Type string `xml:"type,attr"`
Target *SerialTarget `xml:"target,omitempty"`
Source *SerialSource `xml:"source,omitempty"`
Alias *Alias `xml:"alias,omitempty"`
Log *SerialLog `xml:"log,omitempty"`
}
type SerialTarget struct {
Port *uint `xml:"port,attr,omitempty"`
}
type SerialSource struct {
Mode string `xml:"mode,attr,omitempty"`
Path string `xml:"path,attr,omitempty"`
}
type SerialLog struct {
File string `xml:"file,attr,omitempty"`
Append string `xml:"append,attr,omitempty"`
}
// END Serial -----------------------------
// BEGIN Console -----------------------------
type Console struct {
Type string `xml:"type,attr"`
Target *ConsoleTarget `xml:"target,omitempty"`
Source *ConsoleSource `xml:"source,omitempty"`
Alias *Alias `xml:"alias,omitempty"`
}
type ConsoleTarget struct {
Type *string `xml:"type,attr,omitempty"`
Port *uint `xml:"port,attr,omitempty"`
}
type ConsoleSource struct {
Mode string `xml:"mode,attr,omitempty"`
Path string `xml:"path,attr,omitempty"`
}
// END Serial -----------------------------
// BEGIN Inteface -----------------------------
type Interface struct {
XMLName xml.Name `xml:"interface"`
Address *Address `xml:"address,omitempty"`
Type string `xml:"type,attr"`
TrustGuestRxFilters string `xml:"trustGuestRxFilters,attr,omitempty"`
Source InterfaceSource `xml:"source"`
Target *InterfaceTarget `xml:"target,omitempty"`
Model *Model `xml:"model,omitempty"`
MAC *MAC `xml:"mac,omitempty"`
MTU *MTU `xml:"mtu,omitempty"`
BandWidth *BandWidth `xml:"bandwidth,omitempty"`
BootOrder *BootOrder `xml:"boot,omitempty"`
LinkState *LinkState `xml:"link,omitempty"`
FilterRef *FilterRef `xml:"filterref,omitempty"`
Alias *Alias `xml:"alias,omitempty"`
Driver *InterfaceDriver `xml:"driver,omitempty"`
Rom *Rom `xml:"rom,omitempty"`
ACPI *ACPI `xml:"acpi,omitempty"`
Backend *InterfaceBackend `xml:"backend,omitempty"`
PortForward []InterfacePortForward `xml:"portForward,omitempty"`
}
type InterfacePortForward struct {
Proto string `xml:"proto,attr"`
Address string `xml:"address,attr,omitempty"`
Dev string `xml:"dev,attr,omitempty"`
Ranges []InterfacePortForwardRange `xml:"range,omitempty"`
}
type InterfacePortForwardRange struct {
Start uint `xml:"start,attr"`
End uint `xml:"end,attr,omitempty"`
To uint `xml:"to,attr,omitempty"`
Exclude string `xml:"exclude,attr,omitempty"`
}
type InterfaceBackend struct {
Type string `xml:"type,attr,omitempty"`
LogFile string `xml:"logFile,attr,omitempty"`
}
type ACPI struct {
Index uint `xml:"index,attr"`
}
type InterfaceDriver struct {
Name string `xml:"name,attr"`
Queues *uint `xml:"queues,attr,omitempty"`
IOMMU string `xml:"iommu,attr,omitempty"`
}
type LinkState struct {
State string `xml:"state,attr"`
}
type BandWidth struct {
}
type BootOrder struct {
Order uint `xml:"order,attr"`
}
type MAC struct {
MAC string `xml:"address,attr"`
}
type MTU struct {
Size string `xml:"size,attr"`
}
type FilterRef struct {
Filter string `xml:"filter,attr"`
}
type InterfaceSource struct {
Network string `xml:"network,attr,omitempty"`
Device string `xml:"dev,attr,omitempty"`
Bridge string `xml:"bridge,attr,omitempty"`
Mode string `xml:"mode,attr,omitempty"`
Address *Address `xml:"address,omitempty"`
}
type Model struct {
Type string `xml:"type,attr"`
}
type InterfaceTarget struct {
Device string `xml:"dev,attr"`
Managed string `xml:"managed,attr,omitempty"`
}
type Alias struct {
name string
userDefined bool
}
// Package private, responsible to interact with xml and json marshal/unmarshal
type userAliasMarshal struct {
Name string `xml:"name,attr"`
UserDefined bool `xml:"-"`
}
type Rom struct {
Enabled string `xml:"enabled,attr"`
}
func NewUserDefinedAlias(aliasName string) *Alias {
return &Alias{name: aliasName, userDefined: true}
}
func NewNonUserDefinedAlias(aliasName string) *Alias {
return &Alias{name: aliasName, userDefined: false}
}
func (alias Alias) GetName() string {
return alias.name
}
func (alias Alias) IsUserDefined() bool {
return alias.userDefined
}
func (alias Alias) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
userAlias := userAliasMarshal{Name: alias.name}
if alias.userDefined {
userAlias.Name = UserAliasPrefix + userAlias.Name
}
return e.EncodeElement(userAlias, start)
}
func (alias *Alias) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var userAlias userAliasMarshal
err := d.DecodeElement(&userAlias, &start)
if err != nil {
return err
}
*alias = Alias{name: userAlias.Name}
if strings.HasPrefix(alias.name, UserAliasPrefix) {
alias.userDefined = true
alias.name = alias.name[len(UserAliasPrefix):]
}
return nil
}
func (alias Alias) MarshalJSON() ([]byte, error) {
userAlias := userAliasMarshal{Name: alias.name, UserDefined: alias.userDefined}
return json.Marshal(&userAlias)
}
func (alias *Alias) UnmarshalJSON(data []byte) error {
var userAlias userAliasMarshal
if err := json.Unmarshal(data, &userAlias); err != nil {
return err
}
*alias = Alias{name: userAlias.Name, userDefined: userAlias.UserDefined}
return nil
}
// END Inteface -----------------------------
//BEGIN OS --------------------
type OS struct {
Type OSType `xml:"type"`
ACPI *OSACPI `xml:"acpi,omitempty"`
SMBios *SMBios `xml:"smbios,omitempty"`
BootOrder []Boot `xml:"boot"`
BootMenu *BootMenu `xml:"bootmenu,omitempty"`
BIOS *BIOS `xml:"bios,omitempty"`
BootLoader *Loader `xml:"loader,omitempty"`
NVRam *NVRam `xml:"nvram,omitempty"`
Kernel string `xml:"kernel,omitempty"`
Initrd string `xml:"initrd,omitempty"`
KernelArgs string `xml:"cmdline,omitempty"`
}
type OSType struct {
OS string `xml:",chardata"`
Arch string `xml:"arch,attr,omitempty"`
Machine string `xml:"machine,attr,omitempty"`
}
type OSACPI struct {
Table []ACPITable `xml:"table,omitempty"`
}
type ACPITable struct {
Path string `xml:",chardata"`
Type string `xml:"type,attr,omitempty"`
}
type SMBios struct {
Mode string `xml:"mode,attr"`
}
type NVRam struct {
Template string `xml:"template,attr,omitempty"`
NVRam string `xml:",chardata"`
}
type Boot struct {
Dev string `xml:"dev,attr"`
}
type BootMenu struct {
Enable string `xml:"enable,attr"`
Timeout *uint `xml:"timeout,attr,omitempty"`
}
type Loader struct {
ReadOnly string `xml:"readonly,attr,omitempty"`
Secure string `xml:"secure,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
Path string `xml:",chardata"`
}
// TODO <bios rebootTimeout='0'/>
type BIOS struct {
UseSerial string `xml:"useserial,attr,omitempty"`
}
type SysInfo struct {
Type string `xml:"type,attr"`
System []Entry `xml:"system>entry"`
BIOS []Entry `xml:"bios>entry"`
BaseBoard []Entry `xml:"baseBoard>entry"`
Chassis []Entry `xml:"chassis>entry"`
}
type Entry struct {
Name string `xml:"name,attr"`
Value string `xml:",chardata"`
}
//END OS --------------------
//BEGIN LaunchSecurity --------------------
type LaunchSecurity struct {
Type string `xml:"type,attr"`
DHCert string `xml:"dhCert,omitempty"`
Session string `xml:"session,omitempty"`
Cbitpos string `xml:"cbitpos,omitempty"`
ReducedPhysBits string `xml:"reducedPhysBits,omitempty"`
Policy string `xml:"policy,omitempty"`
}
//END LaunchSecurity --------------------
//BEGIN Clock --------------------
type Clock struct {
Offset string `xml:"offset,attr,omitempty"`
Timezone string `xml:"timezone,attr,omitempty"`
Adjustment string `xml:"adjustment,attr,omitempty"`
Timer []Timer `xml:"timer,omitempty"`
}
type Timer struct {
Name string `xml:"name,attr"`
TickPolicy string `xml:"tickpolicy,attr,omitempty"`
Present string `xml:"present,attr,omitempty"`
Track string `xml:"track,attr,omitempty"`
Frequency string `xml:"frequency,attr,omitempty"`
}
//END Clock --------------------
//BEGIN Channel --------------------
type Channel struct {
Type string `xml:"type,attr"`
Source *ChannelSource `xml:"source,omitempty"`
Target *ChannelTarget `xml:"target,omitempty"`
}
type ChannelTarget struct {
Name string `xml:"name,attr,omitempty"`
Type string `xml:"type,attr"`
Address string `xml:"address,attr,omitempty"`
Port uint `xml:"port,attr,omitempty"`
State string `xml:"state,attr,omitempty"`
}
type ChannelSource struct {
Mode string `xml:"mode,attr"`
Path string `xml:"path,attr"`
}
//END Channel --------------------
//BEGIN Sound -------------------
type SoundCard struct {
Alias *Alias `xml:"alias,omitempty"`
Model string `xml:"model,attr"`
}
//END Sound -------------------
//BEGIN Video -------------------
type Video struct {
Model VideoModel `xml:"model"`
}
type VideoModel struct {
Type string `xml:"type,attr"`
Heads *uint `xml:"heads,attr,omitempty"`
Ram *uint `xml:"ram,attr,omitempty"`
VRam *uint `xml:"vram,attr,omitempty"`
VGAMem *uint `xml:"vgamem,attr,omitempty"`
}
type Graphics struct {
AutoPort string `xml:"autoport,attr,omitempty"`
DefaultMode string `xml:"defaultMode,attr,omitempty"`
Listen *GraphicsListen `xml:"listen,omitempty"`
PasswdValidTo string `xml:"passwdValidTo,attr,omitempty"`
Port int32 `xml:"port,attr,omitempty"`
TLSPort int `xml:"tlsPort,attr,omitempty"`
Type string `xml:"type,attr"`
}
type GraphicsListen struct {
Type string `xml:"type,attr"`
Address string `xml:"address,attr,omitempty"`
Network string `xml:"newtork,attr,omitempty"`
Socket string `xml:"socket,attr,omitempty"`
}
type Address struct {
Type string `xml:"type,attr"`
Domain string `xml:"domain,attr,omitempty"`
Bus string `xml:"bus,attr"`
Slot string `xml:"slot,attr,omitempty"`
Function string `xml:"function,attr,omitempty"`
Controller string `xml:"controller,attr,omitempty"`
Target string `xml:"target,attr,omitempty"`
Unit string `xml:"unit,attr,omitempty"`
UUID string `xml:"uuid,attr,omitempty"`
Device string `xml:"device,attr,omitempty"`
CSSID string `xml:"cssid,attr,omitempty"`
SSID string `xml:"ssid,attr,omitempty"`
DevNo string `xml:"devno,attr,omitempty"`
}
//END Video -------------------
//BEGIN VSOCK -------------------
type VSOCK struct {
Model string `xml:"model,attr,omitempty"`
CID CID `xml:"cid"`
}
type CID struct {
Auto string `xml:"auto,attr"`
Address uint32 `xml:"address,attr,omitempty"`
}
//END VSOCK -------------------
type Stats struct {
Period uint `xml:"period,attr"`
}
type MemBalloon struct {
Model string `xml:"model,attr"`
Stats *Stats `xml:"stats,omitempty"`
Address *Address `xml:"address,omitempty"`
Driver *MemBalloonDriver `xml:"driver,omitempty"`
FreePageReporting string `xml:"freePageReporting,attr,omitempty"`
}
type MemBalloonDriver struct {
IOMMU string `xml:"iommu,attr,omitempty"`
}
type Watchdog struct {
Model string `xml:"model,attr"`
Action string `xml:"action,attr"`
Alias *Alias `xml:"alias,omitempty"`
Address *Address `xml:"address,omitempty"`
}
// Rng represents the source of entropy from host to VM
type Rng struct {
// Model attribute specifies what type of RNG device is provided
Model string `xml:"model,attr"`
// Backend specifies the source of entropy to be used
Backend *RngBackend `xml:"backend,omitempty"`
Address *Address `xml:"address,omitempty"`
Driver *RngDriver `xml:"driver,omitempty"`
}
type RngDriver struct {
IOMMU string `xml:"iommu,attr,omitempty"`
}
// RngRate sets the limiting factor how to read from entropy source
type RngRate struct {
// Period define how long is the read period
Period uint32 `xml:"period,attr"`
// Bytes define how many bytes can guest read from entropy source
Bytes uint32 `xml:"bytes,attr"`
}
// RngBackend is the backend device used
type RngBackend struct {
// Model is source model
Model string `xml:"model,attr"`
// specifies the source of entropy to be used
Source string `xml:",chardata"`
}
type IOThreads struct {
IOThreads uint `xml:",chardata"`
}
// TODO ballooning, rng, cpu ...
type SecretUsage struct {
Type string `xml:"type,attr"`
Target string `xml:"target,omitempty"`
}
type SecretSpec struct {
XMLName xml.Name `xml:"secret"`
Ephemeral string `xml:"ephemeral,attr"`
Private string `xml:"private,attr"`
Description string `xml:"description,omitempty"`
Usage SecretUsage `xml:"usage,omitempty"`
}
func NewMinimalDomainSpec(vmiName string) *DomainSpec {
precond.MustNotBeEmpty(vmiName)
domain := &DomainSpec{}
domain.Name = vmiName
domain.Memory = Memory{Unit: "MB", Value: 9}
domain.Devices = Devices{}
return domain
}
func NewMinimalDomain(name string) *Domain {
return NewMinimalDomainWithNS(kubev1.NamespaceDefault, name)
}
func NewMinimalDomainWithUUID(name string, uuid types.UID) *Domain {
domain := NewMinimalDomainWithNS(kubev1.NamespaceDefault, name)
domain.Spec.Metadata = Metadata{
KubeVirt: KubeVirtMetadata{
UID: uuid,
},
}
return domain
}
func NewMinimalDomainWithNS(namespace string, name string) *Domain {
domain := NewDomainReferenceFromName(namespace, name)
domain.Spec = *NewMinimalDomainSpec(namespace + "_" + name)
return domain
}
func NewDomainReferenceFromName(namespace string, name string) *Domain {
return &Domain{
Spec: DomainSpec{},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Status: DomainStatus{},
TypeMeta: metav1.TypeMeta{
APIVersion: "1.2.2",
Kind: "Domain",
},
}
}
func (d *Domain) SetState(state LifeCycle, reason StateChangeReason) {
d.Status.Status = state
d.Status.Reason = reason
}
// Required to satisfy Object interface
func (d *Domain) GetObjectKind() schema.ObjectKind {
return &d.TypeMeta
}
// Required to satisfy ObjectMetaAccessor interface
func (d *Domain) GetObjectMeta() metav1.Object {
return &d.ObjectMeta
}
// Required to satisfy Object interface
func (dl *DomainList) GetObjectKind() schema.ObjectKind {
return &dl.TypeMeta
}
// Required to satisfy ListMetaAccessor interface
func (dl *DomainList) GetListMeta() meta.List {
return &dl.ListMeta
}
// VMINamespaceKeyFunc constructs the domain name with a namespace prefix i.g.
// namespace_name.
func VMINamespaceKeyFunc(vmi *v1.VirtualMachineInstance) string {
domName := fmt.Sprintf("%s_%s", vmi.Namespace, vmi.Name)
return domName
}
package vcpu
import (
"fmt"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
"kubevirt.io/client-go/log"
k8sv1 "k8s.io/api/core/v1"
v12 "kubevirt.io/api/core/v1"
v1 "kubevirt.io/kubevirt/pkg/handler-launcher-com/cmd/v1"
"kubevirt.io/kubevirt/pkg/util/hardware"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
)
type VCPUPool interface {
FitCores() (tune *api.CPUTune, err error)
FitThread() (thread uint32, err error)
}
func CalculateRequestedVCPUs(cpuTopology *api.CPUTopology) uint32 {
return cpuTopology.Cores * cpuTopology.Sockets * cpuTopology.Threads
}
type cell struct {
fullCoresList [][]uint32
fragmentedCoresList []uint32
threadsPerCore int
}
// GetNotFragmentedThreads consumes the amount of threadsPerCore from the numa cell
// or none if it can't be fit on the numa cell
func (c *cell) GetNotFragmentedThreads() []uint32 {
if len(c.fullCoresList) > 0 {
selected := c.fullCoresList[0][:c.threadsPerCore]
remaining := c.fullCoresList[0][c.threadsPerCore:]
if len(remaining) >= c.threadsPerCore {
c.fullCoresList[0] = remaining
} else {
c.fullCoresList = c.fullCoresList[1:]
c.fragmentedCoresList = append(c.fragmentedCoresList, remaining...)
}
return selected
}
return nil
}
// GetFragmentedThreads will allocate as many threadsPerCore out of the request
func (c *cell) GetFragmentedThreads() []uint32 {
if c.threadsPerCore <= len(c.fragmentedCoresList) {
selected := c.fragmentedCoresList[:c.threadsPerCore]
c.fragmentedCoresList = c.fragmentedCoresList[c.threadsPerCore:]
return selected
}
return nil
}
// GetFragmentedThreads will allocate as many threads as possible
// and return them, even if it can only satisfy parts of the request.
func (c *cell) GetFragmentedThreadsUpTo(threads int) []uint32 {
selector := threads
if threads > len(c.fragmentedCoresList) {
selector = len(c.fragmentedCoresList)
}
selected := c.fragmentedCoresList[:selector]
c.fragmentedCoresList = c.fragmentedCoresList[selector:]
return selected
}
// GetThread will first try to allocate a thread from fragmented cores
// but fall back to not fragmented cores if the request can't be satisfied otherwise
func (c *cell) GetThread() *uint32 {
if len(c.fragmentedCoresList) > 0 {
thread := c.fragmentedCoresList[0]
c.fragmentedCoresList = c.fragmentedCoresList[1:]
return &thread
} else if len(c.fullCoresList) > 0 {
thread := c.fullCoresList[0][0]
remaining := c.fullCoresList[0][1:]
if len(remaining) >= c.threadsPerCore {
c.fullCoresList[0] = remaining
} else {
c.fullCoresList = c.fullCoresList[1:]
c.fragmentedCoresList = append(c.fragmentedCoresList, remaining...)
}
return &thread
}
return nil
}
func (c *cell) IsEmpty() bool {
return len(c.fragmentedCoresList) == 0 && len(c.fullCoresList) == 0
}
type cpuPool struct {
// cells contains a host thread mapping of host threads to their cores
// and cores to their numa cells
cells []*cell
// threadsPerCore is the amount of vcpu threads per vcpu core
threadsPerCore int
// cores is the amount of vcpu cores requested by the VMI
cores int
// allowCellCrossing allows inefficient cpu mapping where a single
// core can have threads on different host numa cells
allowCellCrossing bool
// availableThreads is the amount of all threads assigned to the pod
availableThreads int
}
func NewStrictCPUPool(requestedToplogy *api.CPUTopology, nodeTopology *v1.Topology, cpuSet []int) VCPUPool {
return newCPUPool(requestedToplogy, nodeTopology, cpuSet, false)
}
func NewRelaxedCPUPool(requestedToplogy *api.CPUTopology, nodeTopology *v1.Topology, cpuSet []int) VCPUPool {
return newCPUPool(requestedToplogy, nodeTopology, cpuSet, true)
}
func newCPUPool(requestedToplogy *api.CPUTopology, nodeTopology *v1.Topology, cpuSet []int, allowCellCrossing bool) *cpuPool {
pool := &cpuPool{threadsPerCore: int(requestedToplogy.Threads), cores: int(requestedToplogy.Cores * requestedToplogy.Sockets), allowCellCrossing: allowCellCrossing, availableThreads: len(cpuSet)}
cores := cpuChunksToCells(cpuSet, nodeTopology)
for _, coresOnCell := range cores {
c := cell{threadsPerCore: int(requestedToplogy.Threads)}
for j, core := range coresOnCell {
if len(core) >= c.threadsPerCore {
c.fullCoresList = append(c.fullCoresList, coresOnCell[j])
} else {
c.fragmentedCoresList = append(c.fragmentedCoresList, coresOnCell[j]...)
}
}
pool.cells = append(pool.cells, &c)
}
return pool
}
// cpuChunksToCells takes the allocated cpuset, determines which of the threads belongs to which cpu and which numa
// cell and returns an aggregated view. The first dimension of the returned array represents the numa nodes. The next
// level the cores of the corresponding numa node and the inner most array contains the available threads of the core.
func cpuChunksToCells(cpuSet []int, nodeTopology *v1.Topology) (cores [][][]uint32) {
threads := map[int]struct{}{}
visited := map[uint32]struct{}{}
cores = [][][]uint32{}
for _, cpu := range cpuSet {
threads[cpu] = struct{}{}
}
for _, cell := range nodeTopology.NumaCells {
var coresOnCell [][]uint32
for _, cpu := range cell.Cpus {
if _, exists := visited[cpu.Id]; exists {
continue
}
core := []uint32{}
if len(cpu.Siblings) == 0 {
visited[cpu.Id] = struct{}{}
if _, exists := threads[int(cpu.Id)]; exists {
core = append(core, cpu.Id)
}
} else {
for _, thread := range cpu.Siblings {
visited[thread] = struct{}{}
if _, exists := threads[int(thread)]; exists {
core = append(core, thread)
}
}
}
coresOnCell = append(coresOnCell, core)
}
cores = append(cores, coresOnCell)
}
return cores
}
func (p *cpuPool) FitCores() (cpuTune *api.CPUTune, err error) {
threads, remaining := p.fitCores(p.cores)
if remaining > 0 {
if p.allowCellCrossing || p.availableThreads < p.cores*p.threadsPerCore {
return nil, fmt.Errorf("not enough exclusive threads provided, could not fit %v core(s)", remaining)
} else {
return nil, fmt.Errorf("could not fit %v core(s) without crossing numa cell boundaries for individual cores", remaining)
}
}
cpuTune = &api.CPUTune{}
for idx, hostThread := range threads {
vcpupin := api.CPUTuneVCPUPin{}
vcpupin.VCPU = uint32(idx)
vcpupin.CPUSet = strconv.Itoa(int(hostThread))
cpuTune.VCPUPin = append(cpuTune.VCPUPin, vcpupin)
}
return cpuTune, nil
}
func (p *cpuPool) fitCores(coreCount int) (threads []uint32, remainingCores int) {
remainingCores = coreCount
assignedThreads, remainingCores := p.fitCPUBound(remainingCores)
threads = append(threads, assignedThreads...)
if remainingCores == 0 {
return threads, 0
}
assignedThreads, remainingCores = p.fitCellBound(remainingCores)
threads = append(threads, assignedThreads...)
if remainingCores == 0 {
return threads, 0
}
if p.allowCellCrossing {
assignedThreads, remainingCores = p.fitUnbound(remainingCores)
threads = append(threads, assignedThreads...)
if remainingCores == 0 {
return threads, 0
}
}
return threads, remainingCores
}
func (p *cpuPool) FitThread() (thread uint32, err error) {
t := p.fitThread()
if t == nil {
return 0, fmt.Errorf("no remaining unassigned threads")
}
return *t, nil
}
func fitChunk(cells []*cell, requested int, allocator func(cells []*cell, idx int) []uint32) (threads []uint32, remainingCores int) {
for idx := range cells {
for {
chunk := allocator(cells, idx)
if len(chunk) == 0 {
// go to the next cell
break
}
threads = append(threads, chunk...)
requested--
if requested == 0 {
return threads, 0
}
}
}
return threads, requested
}
func (p *cpuPool) fitCPUBound(requested int) (threads []uint32, remainingCores int) {
allocator := func(cell []*cell, idx int) []uint32 {
return cell[idx].GetNotFragmentedThreads()
}
return fitChunk(p.cells, requested, allocator)
}
func (p *cpuPool) fitCellBound(requested int) (threads []uint32, remainingCores int) {
allocator := func(cell []*cell, idx int) []uint32 {
return cell[idx].GetFragmentedThreads()
}
return fitChunk(p.cells, requested, allocator)
}
func (p *cpuPool) fitUnbound(requested int) (threads []uint32, remainingCores int) {
remainingThreads := p.threadsPerCore * requested
for _, cell := range p.cells {
for {
chunk := cell.GetFragmentedThreadsUpTo(remainingThreads)
if len(chunk) == 0 {
// go to the next cell
break
}
threads = append(threads, chunk...)
remainingThreads -= len(chunk)
if remainingThreads < 0 {
panic(fmt.Errorf("this is a bug, remainingCores must never be below 0 but it is %v", remainingThreads))
}
if remainingThreads == 0 {
return threads, 0
}
}
}
return threads, int(float64(remainingThreads)+0.5) / p.threadsPerCore
}
func (p *cpuPool) fitThread() (thread *uint32) {
for _, cell := range p.cells {
if cell.IsEmpty() {
continue
}
return cell.GetThread()
}
return nil
}
func GetCPUTopology(vmi *v12.VirtualMachineInstance) *api.CPUTopology {
cores := uint32(1)
threads := uint32(1)
sockets := uint32(1)
vmiCPU := vmi.Spec.Domain.CPU
if vmiCPU != nil {
if vmiCPU.Cores != 0 {
cores = vmiCPU.Cores
}
if vmiCPU.Threads != 0 {
threads = vmiCPU.Threads
}
if vmiCPU.Sockets != 0 {
sockets = vmiCPU.Sockets
}
}
// A default guest CPU topology is being set in API mutator webhook, if nothing provided by a user.
// However this setting is still required to handle situations when the webhook fails to set a default topology.
if vmiCPU == nil || (vmiCPU.Cores == 0 && vmiCPU.Sockets == 0 && vmiCPU.Threads == 0) {
//if cores, sockets, threads are not set, take value from domain resources request or limits and
//set value into sockets, which have best performance (https://bugzilla.redhat.com/show_bug.cgi?id=1653453)
resources := vmi.Spec.Domain.Resources
if cpuLimit, ok := resources.Limits[k8sv1.ResourceCPU]; ok {
sockets = uint32(cpuLimit.Value())
} else if cpuRequests, ok := resources.Requests[k8sv1.ResourceCPU]; ok {
sockets = uint32(cpuRequests.Value())
}
}
return &api.CPUTopology{
Sockets: sockets,
Cores: cores,
Threads: threads,
}
}
func QuantityToByte(quantity resource.Quantity) (api.Memory, error) {
memorySize, isInt := quantity.AsInt64()
if !isInt {
memorySize = quantity.Value() - 1
}
if memorySize < 0 {
return api.Memory{Unit: "b"}, fmt.Errorf("Memory size '%s' must be greater than or equal to 0", quantity.String())
}
return api.Memory{
Value: uint64(memorySize),
Unit: "b",
}, nil
}
func QuantityToMebiByte(quantity resource.Quantity) (uint64, error) {
bytes, err := QuantityToByte(quantity)
if err != nil {
return 0, err
}
if bytes.Value == 0 {
return 0, nil
} else if bytes.Value < 1048576 {
return 1, nil
}
return uint64(float64(bytes.Value)/1048576 + 0.5), nil
}
func isNumaPassthrough(vmi *v12.VirtualMachineInstance) bool {
return vmi.Spec.Domain.CPU.NUMA != nil && vmi.Spec.Domain.CPU.NUMA.GuestMappingPassthrough != nil
}
func appendDomainEmulatorThreadPin(domain *api.Domain, cpuSet string) {
emulatorThreads := api.CPUEmulatorPin{
CPUSet: cpuSet,
}
domain.Spec.CPUTune.EmulatorPin = &emulatorThreads
}
func appendDomainIOThreadPin(domain *api.Domain, thread uint32, cpuset string) {
iothreadPin := api.CPUTuneIOThreadPin{}
iothreadPin.IOThread = thread
iothreadPin.CPUSet = cpuset
domain.Spec.CPUTune.IOThreadPin = append(domain.Spec.CPUTune.IOThreadPin, iothreadPin)
}
func FormatDomainIOThreadPin(vmi *v12.VirtualMachineInstance, domain *api.Domain, emulatorThreadsCPUSet string, cpuset []int) error {
if domain.Spec.IOThreads == nil {
return fmt.Errorf("domain is missing IOThreads")
}
iothreads := int(domain.Spec.IOThreads.IOThreads)
vcpus := int(CalculateRequestedVCPUs(domain.Spec.CPU.Topology))
switch {
case vmi.Spec.Domain.IOThreads != nil && *vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount > 0:
indexEmulatorThread := 0
if emulatorThreadsCPUSet != "" {
indexEmulatorThread++
}
for i := 1; i <= int(*vmi.Spec.Domain.IOThreads.SupplementalPoolThreadCount); i++ {
// The cpus for the iothreads are additionally allocated and aren't part of the cpu set dedicated to the vcpus threads
cpu := vcpus + i + indexEmulatorThread
appendDomainIOThreadPin(domain, uint32(i), fmt.Sprintf("%d", cpu))
}
case vmi.IsCPUDedicated() && vmi.Spec.Domain.CPU.IsolateEmulatorThread:
// pin the IOThread on the same pCPU as the emulator thread
appendDomainIOThreadPin(domain, uint32(1), emulatorThreadsCPUSet)
case iothreads >= vcpus:
// pin an IOThread on a CPU
for thread := 1; thread <= iothreads; thread++ {
cpuset := fmt.Sprintf("%d", cpuset[thread%vcpus])
appendDomainIOThreadPin(domain, uint32(thread), cpuset)
}
default:
// the following will pin IOThreads to a set of cpus of a balanced size
// for example, for 3 threads and 8 cpus the output will look like:
// thread cpus
// 1 0,1,2
// 2 3,4,5
// 3 6,7
series := vcpus % iothreads
curr := 0
for thread := 1; thread <= iothreads; thread++ {
remainder := vcpus/iothreads - 1
if thread <= series {
remainder += 1
}
end := curr + remainder
slice := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(cpuset[curr:end+1])), ","), "[]")
appendDomainIOThreadPin(domain, uint32(thread), slice)
curr = end + 1
}
}
return nil
}
func FormatEmulatorThreadPin(cpuPool VCPUPool, vmiAnnotations map[string]string, vCPUs int64) (string, error) {
var emulatorThreads []uint32
availableThread, err := cpuPool.FitThread()
if err != nil {
e := fmt.Errorf("no CPU allocated for the emulation thread: %v", err)
log.Log.Reason(e).Error("failed to format emulation thread pin")
return "", e
}
emulatorThreads = append(emulatorThreads, availableThread)
_, emulatorThreadCompleteToEvenParityEnabled := vmiAnnotations[v12.EmulatorThreadCompleteToEvenParity]
if emulatorThreadCompleteToEvenParityEnabled &&
vCPUs%2 == 0 {
availableThread, err = cpuPool.FitThread()
if err != nil {
e := fmt.Errorf("no second CPU allocated for the emulation thread: %v", err)
log.Log.Reason(e).Error("failed to format emulation thread pin")
return "", e
}
emulatorThreads = append(emulatorThreads, availableThread)
}
return convertCPUListToCPUSet(emulatorThreads), nil
}
func AdjustDomainForTopologyAndCPUSet(domain *api.Domain, vmi *v12.VirtualMachineInstance, topology *v1.Topology, cpuset []int, useIOThreads bool) error {
var cpuPool VCPUPool
requestedToplogy := &api.CPUTopology{
Sockets: domain.Spec.CPU.Topology.Sockets,
Cores: domain.Spec.CPU.Topology.Cores,
Threads: domain.Spec.CPU.Topology.Threads,
}
if vmi.Spec.Domain.CPU.MaxSockets != 0 {
disabledVCPUs := 0
for _, vcpu := range domain.Spec.VCPUs.VCPU {
if vcpu.Enabled != "yes" {
disabledVCPUs += 1
}
}
disabledSockets := uint32(disabledVCPUs) / (requestedToplogy.Cores * requestedToplogy.Threads)
requestedToplogy.Sockets -= disabledSockets
}
if isNumaPassthrough(vmi) {
cpuPool = NewStrictCPUPool(requestedToplogy, topology, cpuset)
} else {
cpuPool = NewRelaxedCPUPool(requestedToplogy, topology, cpuset)
}
cpuTune, err := cpuPool.FitCores()
if err != nil {
log.Log.Reason(err).Error("failed to format domain cputune.")
return err
}
domain.Spec.CPUTune = cpuTune
// Add the hint-dedicated feature when dedicatedCPUs are requested for AMD64 architecture.
if isAMD64VMI(vmi) {
if domain.Spec.Features == nil {
domain.Spec.Features = &api.Features{}
}
if domain.Spec.Features.KVM == nil {
domain.Spec.Features.KVM = &api.FeatureKVM{}
}
domain.Spec.Features.KVM.HintDedicated = &api.FeatureState{
State: "on",
}
}
var emulatorThreadsCPUSet string
if vmi.Spec.Domain.CPU.IsolateEmulatorThread {
vCPUs := hardware.GetNumberOfVCPUs(vmi.Spec.Domain.CPU)
if emulatorThreadsCPUSet, err = FormatEmulatorThreadPin(cpuPool, vmi.Annotations, vCPUs); err != nil {
log.Log.Reason(err).Error("failed to format emulation thread pin")
return err
}
appendDomainEmulatorThreadPin(domain, emulatorThreadsCPUSet)
}
if useIOThreads {
if err := FormatDomainIOThreadPin(vmi, domain, emulatorThreadsCPUSet, cpuset); err != nil {
log.Log.Reason(err).Error("failed to format domain iothread pinning.")
return err
}
}
if vmi.IsRealtimeEnabled() {
// RT settings
// To be configured by manifest
// - CPU Model: Host Passthrough
// - VCPU (placement type and number)
// - VCPU Pin (DedicatedCPUPlacement)
// - USB controller should be disabled if no input type usb is found
// - Memballoning can be disabled when setting 'autoattachMemBalloon' to false
// Changes to the vcpu scheduling and priorities are performed by the virt-handler to allow
// workloads that run without CAP_SYS_NICE to work as well as with CAP_SYS_NICE.
domain.Spec.Features.PMU = &api.FeatureState{State: "off"}
}
if isNumaPassthrough(vmi) {
if err := numaMapping(vmi, &domain.Spec, topology); err != nil {
log.Log.Reason(err).Error("failed to calculate passed through NUMA topology.")
return err
}
}
return nil
}
func convertCPUListToCPUSet(allocatedCPUs []uint32) string {
const delimiter = ","
var allocatedCPUsString []string
for _, cpu := range allocatedCPUs {
allocatedCPUsString = append(allocatedCPUsString, strconv.Itoa(int(cpu)))
}
return strings.Join(allocatedCPUsString, delimiter)
}
func cpuToCell(topology *v1.Topology) map[uint32]*v1.Cell {
cpumap := map[uint32]*v1.Cell{}
for i, cell := range topology.NumaCells {
for _, cpu := range cell.Cpus {
cpumap[cpu.Id] = topology.NumaCells[i]
}
}
return cpumap
}
func involvedCells(cpumap map[uint32]*v1.Cell, cpuTune *api.CPUTune) (map[uint32][]uint32, error) {
numamap := map[uint32][]uint32{}
for _, tune := range cpuTune.VCPUPin {
cpu, err := strconv.ParseInt(tune.CPUSet, 10, 32)
if err != nil {
return nil, fmt.Errorf("expected only full cpu to be mapped, but got %v: %v", tune.CPUSet, err)
}
if _, exists := cpumap[uint32(cpu)]; !exists {
return nil, fmt.Errorf("vcpu %v is mapped to a not existing host cpu set %v", tune.VCPU, tune.CPUSet)
}
numamap[cpumap[uint32(cpu)].Id] = append(numamap[cpumap[uint32(cpu)].Id], tune.VCPU)
}
return numamap, nil
}
func GetVirtualMemory(vmi *v12.VirtualMachineInstance) *resource.Quantity {
// In case that guest memory is explicitly set, return it
if vmi.Spec.Domain.Memory != nil && vmi.Spec.Domain.Memory.Guest != nil {
return vmi.Spec.Domain.Memory.Guest
}
// Get the requested memory
reqMemory, isReqMemSet := vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory]
// Otherwise, take memory from the memory-limit, if set and requested Memory not set
if v, ok := vmi.Spec.Domain.Resources.Limits[k8sv1.ResourceMemory]; ok && !isReqMemSet {
return &v
}
// Otherwise, take memory from the requested memory
return &reqMemory
}
// numaMapping maps numa nodes based on already applied VCPU pinning. The sort result is stable compared to the order
// of provided host numa nodes.
func numaMapping(vmi *v12.VirtualMachineInstance, domain *api.DomainSpec, topology *v1.Topology) error {
if topology == nil || len(topology.NumaCells) == 0 {
// If there is no numa topology reported, we don't do anything.
// this also means that emulated numa for e.g. memfd will keep intact
return nil
}
cpumap := cpuToCell(topology)
numamap, err := involvedCells(cpumap, domain.CPUTune)
if err != nil {
return fmt.Errorf("failed to generate numa pinning information: %v", err)
}
var involvedCellIDs []string
for _, cell := range topology.NumaCells {
if _, exists := numamap[cell.Id]; exists {
involvedCellIDs = append(involvedCellIDs, strconv.Itoa(int(cell.Id)))
}
}
domain.CPU.NUMA = &api.NUMA{}
domain.NUMATune = &api.NUMATune{
Memory: api.NumaTuneMemory{
Mode: "strict",
NodeSet: strings.Join(involvedCellIDs, ","),
},
}
hugepagesSize, hugepagesUnit, hugepagesEnabled, err := hugePagesInfo(vmi, domain)
if err != nil {
return fmt.Errorf("failed to determine if hugepages are enabled: %v", err)
} else if !hugepagesEnabled {
return fmt.Errorf("passing through a numa topology is restricted to VMIs with hugepages enabled")
}
domain.MemoryBacking.Allocation = &api.MemoryAllocation{Mode: api.MemoryAllocationModeImmediate}
memory, err := QuantityToByte(*GetVirtualMemory(vmi))
if err != nil {
return fmt.Errorf("could not convert VMI memory to quantity: %v", err)
}
memoryBytes := memory.Value
var mod uint64
cellCount := uint64(len(involvedCellIDs))
if memoryBytes < cellCount*hugepagesSize {
return fmt.Errorf("not enough memory requested to allocate at least one hugepage per numa node: %v < %v", memory, cellCount*(hugepagesSize*1024*1024))
} else if memoryBytes%hugepagesSize != 0 {
return fmt.Errorf("requested memory can't be divided through the numa page size: %v mod %v != 0", memory, hugepagesSize)
}
mod = memoryBytes % (hugepagesSize * cellCount) / hugepagesSize
if mod != 0 {
memoryBytes = memoryBytes - mod*hugepagesSize
}
virtualCellID := -1
for _, cell := range topology.NumaCells {
if vcpus, exists := numamap[cell.Id]; exists {
var cpus []string
for _, cpu := range vcpus {
cpus = append(cpus, strconv.Itoa(int(cpu)))
}
virtualCellID++
domain.CPU.NUMA.Cells = append(domain.CPU.NUMA.Cells, api.NUMACell{
ID: strconv.Itoa(virtualCellID),
CPUs: strings.Join(cpus, ","),
Memory: memoryBytes / uint64(len(numamap)),
Unit: memory.Unit,
})
domain.NUMATune.MemNodes = append(domain.NUMATune.MemNodes, api.MemNode{
CellID: uint32(virtualCellID),
Mode: "strict",
NodeSet: strconv.Itoa(int(cell.Id)),
})
domain.MemoryBacking.HugePages.HugePage = append(domain.MemoryBacking.HugePages.HugePage, api.HugePage{
Size: strconv.Itoa(int(hugepagesSize)),
Unit: hugepagesUnit,
NodeSet: strconv.Itoa(virtualCellID),
})
}
}
if mod > 0 {
for i := range domain.CPU.NUMA.Cells[:mod] {
domain.CPU.NUMA.Cells[i].Memory += hugepagesSize
}
}
if vmi.IsRealtimeEnabled() {
// RT settings when hugepages are enabled
domain.MemoryBacking.NoSharePages = &api.NoSharePages{}
}
return nil
}
func hugePagesInfo(vmi *v12.VirtualMachineInstance, domain *api.DomainSpec) (size uint64, unit string, enabled bool, err error) {
if domain.MemoryBacking != nil && domain.MemoryBacking.HugePages != nil {
if vmi.Spec.Domain.Memory.Hugepages != nil {
quantity, err := resource.ParseQuantity(vmi.Spec.Domain.Memory.Hugepages.PageSize)
if err != nil {
return 0, "", false, fmt.Errorf("could not parse hugepage value %v: %v", vmi.Spec.Domain.Memory.Hugepages.PageSize, err)
}
size, err := QuantityToByte(quantity)
if err != nil {
return 0, "b", false, fmt.Errorf("could not convert page size to MiB %v: %v", vmi.Spec.Domain.Memory.Hugepages.PageSize, err)
}
return size.Value, "b", true, nil
}
}
return 0, "b", false, nil
}
func isAMD64VMI(vmi *v12.VirtualMachineInstance) bool {
return vmi.Spec.Architecture == "amd64"
}
package apply
import (
"context"
"fmt"
"reflect"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
)
func (r *Reconciler) createOrUpdateValidatingWebhookConfigurations(caBundle []byte) error {
for _, webhook := range r.targetStrategy.ValidatingWebhookConfigurations() {
err := r.createOrUpdateValidatingWebhookConfiguration(webhook, caBundle)
if err != nil {
return err
}
}
return nil
}
func convertV1ValidatingWebhookToV1beta1(from *admissionregistrationv1.ValidatingWebhookConfiguration) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) {
var b []byte
b, err := from.Marshal()
if err != nil {
return nil, err
}
webhookv1beta1 := &admissionregistrationv1beta1.ValidatingWebhookConfiguration{}
if err = webhookv1beta1.Unmarshal(b); err != nil {
return nil, err
}
return webhookv1beta1, nil
}
func convertV1beta1ValidatingWebhookToV1(from *admissionregistrationv1beta1.ValidatingWebhookConfiguration) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) {
var b []byte
b, err := from.Marshal()
if err != nil {
return nil, err
}
webhookv1 := &admissionregistrationv1.ValidatingWebhookConfiguration{}
if err = webhookv1.Unmarshal(b); err != nil {
return nil, err
}
return webhookv1, nil
}
func convertV1MutatingWebhookToV1beta1(from *admissionregistrationv1.MutatingWebhookConfiguration) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) {
var b []byte
b, err := from.Marshal()
if err != nil {
return nil, err
}
webhookv1beta1 := &admissionregistrationv1beta1.MutatingWebhookConfiguration{}
if err = webhookv1beta1.Unmarshal(b); err != nil {
return nil, err
}
return webhookv1beta1, nil
}
func convertV1beta1MutatingWebhookToV1(from *admissionregistrationv1beta1.MutatingWebhookConfiguration) (*admissionregistrationv1.MutatingWebhookConfiguration, error) {
var b []byte
b, err := from.Marshal()
if err != nil {
return nil, err
}
webhookv1 := &admissionregistrationv1.MutatingWebhookConfiguration{}
if err = webhookv1.Unmarshal(b); err != nil {
return nil, err
}
return webhookv1, nil
}
func (r *Reconciler) patchValidatingWebhookConfiguration(webhook *admissionregistrationv1.ValidatingWebhookConfiguration, patchBytes []byte) (patchedWebhook *admissionregistrationv1.ValidatingWebhookConfiguration, err error) {
switch webhook.APIVersion {
case admissionregistrationv1.SchemeGroupVersion.Version, admissionregistrationv1.SchemeGroupVersion.String():
patchedWebhook, err = r.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Patch(context.Background(), webhook.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
case admissionregistrationv1beta1.SchemeGroupVersion.Version, admissionregistrationv1beta1.SchemeGroupVersion.String():
var out *admissionregistrationv1beta1.ValidatingWebhookConfiguration
out, err = r.clientset.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Patch(context.Background(), webhook.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return
}
patchedWebhook, err = convertV1beta1ValidatingWebhookToV1(out)
default:
err = fmt.Errorf("ValidatingWebhookConfiguration APIVersion %s not supported", webhook.APIVersion)
}
return
}
func (r *Reconciler) createValidatingWebhookConfiguration(webhook *admissionregistrationv1.ValidatingWebhookConfiguration) (createdWebhook *admissionregistrationv1.ValidatingWebhookConfiguration, err error) {
switch webhook.APIVersion {
case admissionregistrationv1.SchemeGroupVersion.Version, admissionregistrationv1.SchemeGroupVersion.String():
createdWebhook, err = r.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.Background(), webhook, metav1.CreateOptions{})
case admissionregistrationv1beta1.SchemeGroupVersion.Version, admissionregistrationv1beta1.SchemeGroupVersion.String():
var webhookv1beta1 *admissionregistrationv1beta1.ValidatingWebhookConfiguration
webhookv1beta1, err = convertV1ValidatingWebhookToV1beta1(webhook)
if err != nil {
return nil, err
}
webhookv1beta1, err = r.clientset.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.Background(), webhookv1beta1, metav1.CreateOptions{})
if err != nil {
return nil, err
}
createdWebhook, err = convertV1beta1ValidatingWebhookToV1(webhookv1beta1)
default:
err = fmt.Errorf("ValidatingWebhookConfiguration APIVersion %s not supported", webhook.APIVersion)
}
return
}
func (r *Reconciler) patchMutatingWebhookConfiguration(webhook *admissionregistrationv1.MutatingWebhookConfiguration, patchBytes []byte) (patchedWebhook *admissionregistrationv1.MutatingWebhookConfiguration, err error) {
switch webhook.APIVersion {
case admissionregistrationv1.SchemeGroupVersion.Version, admissionregistrationv1.SchemeGroupVersion.String():
patchedWebhook, err = r.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Patch(context.Background(), webhook.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
case admissionregistrationv1beta1.SchemeGroupVersion.Version, admissionregistrationv1beta1.SchemeGroupVersion.String():
var out *admissionregistrationv1beta1.MutatingWebhookConfiguration
out, err = r.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Patch(context.Background(), webhook.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return
}
patchedWebhook, err = convertV1beta1MutatingWebhookToV1(out)
default:
err = fmt.Errorf("MutatingWebhookConfiguration APIVersion %s not supported", webhook.APIVersion)
}
return
}
func (r *Reconciler) createMutatingWebhookConfiguration(webhook *admissionregistrationv1.MutatingWebhookConfiguration) (createdWebhook *admissionregistrationv1.MutatingWebhookConfiguration, err error) {
switch webhook.APIVersion {
case admissionregistrationv1.SchemeGroupVersion.Version, admissionregistrationv1.SchemeGroupVersion.String():
createdWebhook, err = r.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.Background(), webhook, metav1.CreateOptions{})
case admissionregistrationv1beta1.SchemeGroupVersion.Version, admissionregistrationv1beta1.SchemeGroupVersion.String():
var webhookv1beta1 *admissionregistrationv1beta1.MutatingWebhookConfiguration
webhookv1beta1, err = convertV1MutatingWebhookToV1beta1(webhook)
if err != nil {
return nil, err
}
webhookv1beta1, err = r.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.Background(), webhookv1beta1, metav1.CreateOptions{})
if err != nil {
return nil, err
}
createdWebhook, err = convertV1beta1MutatingWebhookToV1(webhookv1beta1)
default:
err = fmt.Errorf("MutatingWebhookConfiguration APIVersion %s not supported", webhook.APIVersion)
}
return
}
func (r *Reconciler) createOrUpdateValidatingWebhookConfiguration(webhook *admissionregistrationv1.ValidatingWebhookConfiguration, caBundle []byte) error {
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
webhook = webhook.DeepCopy()
for i := range webhook.Webhooks {
webhook.Webhooks[i].ClientConfig.CABundle = caBundle
}
injectOperatorMetadata(r.kv, &webhook.ObjectMeta, version, imageRegistry, id, true)
var cachedWebhook *admissionregistrationv1.ValidatingWebhookConfiguration
var err error
obj, exists, _ := r.stores.ValidationWebhookCache.Get(webhook)
// since these objects was in the past unmanaged, reconcile and pick it up if it exists
if !exists {
cachedWebhook, err = r.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(context.Background(), webhook.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
exists = false
} else if err != nil {
return err
} else {
exists = true
}
} else {
cachedWebhook = obj.(*admissionregistrationv1.ValidatingWebhookConfiguration)
}
certsMatch := true
if exists {
for _, wh := range cachedWebhook.Webhooks {
if !reflect.DeepEqual(wh.ClientConfig.CABundle, caBundle) {
certsMatch = false
break
}
}
}
if !exists {
r.expectations.ValidationWebhook.RaiseExpectations(r.kvKey, 1, 0)
webhook, err := r.createValidatingWebhookConfiguration(webhook)
if err != nil {
r.expectations.ValidationWebhook.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create validatingwebhook %+v: %v", webhook, err)
}
SetGeneration(&r.kv.Status.Generations, webhook)
return nil
}
modified := resourcemerge.BoolPtr(false)
existingCopy := cachedWebhook.DeepCopy()
expectedGeneration := GetExpectedGeneration(webhook, r.kv.Status.Generations)
resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, webhook.ObjectMeta)
// there was no change to metadata, the generation was right
if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && certsMatch {
log.Log.V(4).Infof("validatingwebhookconfiguration %v is up-to-date", webhook.GetName())
return nil
}
// Patch if old version
patchBytes, err := generateWebhooksPatch(cachedWebhook.ObjectMeta.Generation, webhook.ObjectMeta, webhook.Webhooks)
if err != nil {
return err
}
webhook, err = r.patchValidatingWebhookConfiguration(webhook, patchBytes)
if err != nil {
return fmt.Errorf("unable to update validatingwebhookconfiguration %+v: %v", webhook, err)
}
SetGeneration(&r.kv.Status.Generations, webhook)
log.Log.V(2).Infof("validatingwebhoookconfiguration %v updated", webhook.Name)
return nil
}
func (r *Reconciler) createOrUpdateMutatingWebhookConfigurations(caBundle []byte) error {
for _, webhook := range r.targetStrategy.MutatingWebhookConfigurations() {
err := r.createOrUpdateMutatingWebhookConfiguration(webhook, caBundle)
if err != nil {
return err
}
}
return nil
}
func generateWebhooksPatch(generation int64, metaData metav1.ObjectMeta, webhooks interface{}) ([]byte, error) {
patchSet := patch.New(patch.WithTest("/metadata/generation", generation))
patchSet.AddOption(createLabelsAndAnnotationsPatch(&metaData)...)
patchSet.AddOption(patch.WithReplace("/webhooks", webhooks))
return patchSet.GeneratePayload()
}
func (r *Reconciler) createOrUpdateMutatingWebhookConfiguration(webhook *admissionregistrationv1.MutatingWebhookConfiguration, caBundle []byte) error {
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
webhook = webhook.DeepCopy()
for i := range webhook.Webhooks {
webhook.Webhooks[i].ClientConfig.CABundle = caBundle
}
injectOperatorMetadata(r.kv, &webhook.ObjectMeta, version, imageRegistry, id, true)
var cachedWebhook *admissionregistrationv1.MutatingWebhookConfiguration
var err error
obj, exists, _ := r.stores.MutatingWebhookCache.Get(webhook)
// since these objects was in the past unmanaged, reconcile and pick it up if it exists
if !exists {
cachedWebhook, err = r.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(context.Background(), webhook.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
exists = false
} else if err != nil {
return err
} else {
exists = true
}
} else {
cachedWebhook = obj.(*admissionregistrationv1.MutatingWebhookConfiguration)
}
certsMatch := true
if exists {
for _, wh := range cachedWebhook.Webhooks {
if !reflect.DeepEqual(wh.ClientConfig.CABundle, caBundle) {
certsMatch = false
break
}
}
}
if !exists {
r.expectations.MutatingWebhook.RaiseExpectations(r.kvKey, 1, 0)
webhook, err := r.createMutatingWebhookConfiguration(webhook)
if err != nil {
r.expectations.MutatingWebhook.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create mutatingwebhook %+v: %v", webhook, err)
}
SetGeneration(&r.kv.Status.Generations, webhook)
log.Log.V(2).Infof("mutatingwebhoookconfiguration %v created", webhook.Name)
return nil
}
modified := resourcemerge.BoolPtr(false)
existingCopy := cachedWebhook.DeepCopy()
expectedGeneration := GetExpectedGeneration(webhook, r.kv.Status.Generations)
resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, webhook.ObjectMeta)
// there was no change to metadata, the generation was right
if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && certsMatch {
log.Log.V(4).Infof("mutating webhook configuration %v is up-to-date", webhook.GetName())
return nil
}
patchBytes, err := generateWebhooksPatch(cachedWebhook.ObjectMeta.Generation, webhook.ObjectMeta, webhook.Webhooks)
if err != nil {
return err
}
webhook, err = r.patchMutatingWebhookConfiguration(webhook, patchBytes)
if err != nil {
return fmt.Errorf("unable to update mutatingwebhookconfiguration %+v: %v", webhook, err)
}
SetGeneration(&r.kv.Status.Generations, webhook)
log.Log.V(2).Infof("mutatingwebhoookconfiguration %v updated", webhook.Name)
return nil
}
func (r *Reconciler) createOrUpdateValidatingAdmissionPolicyBindings() error {
if !r.config.ValidatingAdmissionPolicyBindingEnabled {
return nil
}
for _, validatingAdmissionPolicyBinding := range r.targetStrategy.ValidatingAdmissionPolicyBindings() {
err := r.createOrUpdateValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding.DeepCopy())
if err != nil {
return err
}
}
return nil
}
func (r *Reconciler) createOrUpdateValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding) error {
admissionRegistrationV1 := r.clientset.AdmissionregistrationV1()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &validatingAdmissionPolicyBinding.ObjectMeta, version, imageRegistry, id, true)
obj, exists, _ := r.stores.ValidatingAdmissionPolicyBindingCache.Get(validatingAdmissionPolicyBinding)
if !exists {
r.expectations.ValidatingAdmissionPolicyBinding.RaiseExpectations(r.kvKey, 1, 0)
_, err := admissionRegistrationV1.ValidatingAdmissionPolicyBindings().Create(context.Background(), validatingAdmissionPolicyBinding, metav1.CreateOptions{})
if err != nil {
r.expectations.ValidatingAdmissionPolicyBinding.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create validatingAdmissionPolicyBinding %+v: %v", validatingAdmissionPolicyBinding, err)
}
return nil
}
cachedValidatingAdmissionPolicyBinding := obj.(*admissionregistrationv1.ValidatingAdmissionPolicyBinding)
patchSet := patch.New()
patchSet.AddOption(getObjectMetaPatch(validatingAdmissionPolicyBinding.ObjectMeta,
cachedValidatingAdmissionPolicyBinding.ObjectMeta)...)
if !equality.Semantic.DeepEqual(cachedValidatingAdmissionPolicyBinding.Spec, validatingAdmissionPolicyBinding.Spec) {
patchSet.AddOption(patch.WithReplace("/spec", validatingAdmissionPolicyBinding.Spec))
}
if patchSet.IsEmpty() {
log.Log.V(4).Infof("validatingAdmissionPolicyBinding %v is up-to-date", validatingAdmissionPolicyBinding.GetName())
return nil
}
p, err := patchSet.GeneratePayload()
if err != nil {
return fmt.Errorf("unable to generate validatingAdmissionPolicyBinding patch operations for %+v: %v", validatingAdmissionPolicyBinding, err)
}
_, err = admissionRegistrationV1.ValidatingAdmissionPolicyBindings().Patch(context.Background(),
validatingAdmissionPolicyBinding.Name,
types.JSONPatchType,
p,
metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch validatingAdmissionPolicyBinding %+v: %v", validatingAdmissionPolicyBinding, err)
}
log.Log.V(2).Infof("validatingAdmissionPolicyBinding %v patched", validatingAdmissionPolicyBinding.GetName())
return nil
}
func (r *Reconciler) createOrUpdateValidatingAdmissionPolicies() error {
if !r.config.ValidatingAdmissionPolicyEnabled {
return nil
}
for _, validatingAdmissionPolicy := range r.targetStrategy.ValidatingAdmissionPolicies() {
err := r.createOrUpdateValidatingAdmissionPolicy(validatingAdmissionPolicy.DeepCopy())
if err != nil {
return err
}
}
return nil
}
func (r *Reconciler) createOrUpdateValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy) error {
admissionRegistrationV1 := r.clientset.AdmissionregistrationV1()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &validatingAdmissionPolicy.ObjectMeta, version, imageRegistry, id, true)
obj, exists, _ := r.stores.ValidatingAdmissionPolicyCache.Get(validatingAdmissionPolicy)
if !exists {
r.expectations.ValidatingAdmissionPolicy.RaiseExpectations(r.kvKey, 1, 0)
_, err := admissionRegistrationV1.ValidatingAdmissionPolicies().Create(context.Background(), validatingAdmissionPolicy, metav1.CreateOptions{})
if err != nil {
r.expectations.ValidatingAdmissionPolicy.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create validatingAdmissionPolicy %+v: %v", validatingAdmissionPolicy, err)
}
return nil
}
cachedValidatingAdmissionPolicy := obj.(*admissionregistrationv1.ValidatingAdmissionPolicy)
patchSet := patch.New()
patchSet.AddOption(getObjectMetaPatch(validatingAdmissionPolicy.ObjectMeta, cachedValidatingAdmissionPolicy.ObjectMeta)...)
if !equality.Semantic.DeepEqual(cachedValidatingAdmissionPolicy.Spec, validatingAdmissionPolicy.Spec) {
patchSet.AddOption(patch.WithReplace("/spec", validatingAdmissionPolicy.Spec))
}
if patchSet.IsEmpty() {
log.Log.V(4).Infof("validatingAdmissionPolicyBinding %v is up-to-date", validatingAdmissionPolicy.GetName())
return nil
}
p, err := patchSet.GeneratePayload()
if err != nil {
return fmt.Errorf("unable to generate validatingAdmissionPolicy patch operations for %+v: %v", validatingAdmissionPolicy, err)
}
_, err = admissionRegistrationV1.ValidatingAdmissionPolicies().Patch(context.Background(), validatingAdmissionPolicy.Name, types.JSONPatchType, p, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch validatingAdmissionPolicy %+v: %v", validatingAdmissionPolicy, err)
}
log.Log.V(2).Infof("validatingAdmissionPolicy %v patched", validatingAdmissionPolicy.GetName())
return nil
}
package apply
import (
"context"
"fmt"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
)
func (r *Reconciler) createOrUpdateAPIServices(caBundle []byte) error {
for _, apiService := range r.targetStrategy.APIServices() {
err := r.createOrUpdateAPIService(apiService.DeepCopy(), caBundle)
if err != nil {
return err
}
}
return nil
}
func (r *Reconciler) createOrUpdateAPIService(apiService *apiregv1.APIService, caBundle []byte) error {
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &apiService.ObjectMeta, version, imageRegistry, id, true)
apiService.Spec.CABundle = caBundle
var cachedAPIService *apiregv1.APIService
var err error
obj, exists, _ := r.stores.APIServiceCache.Get(apiService)
// since these objects was in the past unmanaged, reconcile and pick it up if it exists
if !exists {
cachedAPIService, err = r.aggregatorclient.Get(context.Background(), apiService.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
exists = false
} else if err != nil {
return err
} else {
exists = true
}
} else {
cachedAPIService = obj.(*apiregv1.APIService)
}
if !exists {
r.expectations.APIService.RaiseExpectations(r.kvKey, 1, 0)
_, err := r.aggregatorclient.Create(context.Background(), apiService, metav1.CreateOptions{})
if err != nil {
r.expectations.APIService.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create apiservice %+v: %v", apiService, err)
}
return nil
}
modified := resourcemerge.BoolPtr(false)
resourcemerge.EnsureObjectMeta(modified, &cachedAPIService.ObjectMeta, apiService.ObjectMeta)
serviceSame := equality.Semantic.DeepEqual(cachedAPIService.Spec.Service, apiService.Spec.Service)
certsSame := equality.Semantic.DeepEqual(apiService.Spec.CABundle, cachedAPIService.Spec.CABundle)
prioritySame := cachedAPIService.Spec.VersionPriority == apiService.Spec.VersionPriority && cachedAPIService.Spec.GroupPriorityMinimum == apiService.Spec.GroupPriorityMinimum
insecureSame := cachedAPIService.Spec.InsecureSkipTLSVerify == apiService.Spec.InsecureSkipTLSVerify
// there was no change to metadata, the service and priorities were right
if !*modified && serviceSame && prioritySame && insecureSame && certsSame {
log.Log.V(4).Infof("apiservice %v is up-to-date", apiService.GetName())
return nil
}
patchBytes, err := patch.New(getPatchWithObjectMetaAndSpec([]patch.PatchOption{}, &apiService.ObjectMeta, apiService.Spec)...).GeneratePayload()
if err != nil {
return err
}
_, err = r.aggregatorclient.Patch(context.Background(), apiService.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch apiservice %+v: %v", apiService, err)
}
log.Log.V(4).Infof("apiservice %v updated", apiService.GetName())
return nil
}
package apply
import (
"context"
"fmt"
"strings"
jsonpatch "github.com/evanphx/json-patch"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/placement"
"kubevirt.io/kubevirt/pkg/virt-operator/util"
)
const (
failedUpdateDaemonSetReason = "FailedUpdate"
)
var (
daemonSetDefaultMaxUnavailable = intstr.FromInt(1)
daemonSetFastMaxUnavailable = intstr.FromString("10%")
)
type CanaryUpgradeStatus string
const (
CanaryUpgradeStatusStarted CanaryUpgradeStatus = "started"
CanaryUpgradeStatusUpgradingDaemonSet CanaryUpgradeStatus = "upgrading daemonset"
CanaryUpgradeStatusWaitingDaemonSetRollout CanaryUpgradeStatus = "waiting for daemonset rollout"
CanaryUpgradeStatusSuccessful CanaryUpgradeStatus = "successful"
CanaryUpgradeStatusFailed CanaryUpgradeStatus = "failed"
)
func (r *Reconciler) syncDeployment(origDeployment *appsv1.Deployment) (*appsv1.Deployment, error) {
kv := r.kv
deployment := origDeployment.DeepCopy()
apps := r.clientset.AppsV1()
imageTag, imageRegistry, id := getTargetVersionRegistryID(kv)
injectOperatorMetadata(kv, &deployment.ObjectMeta, imageTag, imageRegistry, id, true)
injectOperatorMetadata(kv, &deployment.Spec.Template.ObjectMeta, imageTag, imageRegistry, id, false)
placement.InjectPlacementMetadata(kv.Spec.Infra, &deployment.Spec.Template.Spec, placement.RequireControlPlanePreferNonWorker)
if kv.Spec.Infra != nil && kv.Spec.Infra.Replicas != nil {
replicas := int32(*kv.Spec.Infra.Replicas)
if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != replicas {
deployment.Spec.Replicas = &replicas
r.recorder.Eventf(deployment, corev1.EventTypeWarning, "AdvancedFeatureUse", "applying custom number of infra replica. this is an advanced feature that prevents "+
"auto-scaling for core kubevirt components. Please use with caution!")
}
} else if deployment.Name == components.VirtAPIName && !replicasAlreadyPatched(r.kv.Spec.CustomizeComponents.Patches, components.VirtAPIName) {
replicas, err := getDesiredApiReplicas(r.clientset)
if err != nil {
log.Log.Object(deployment).Warningf("%s", err.Error())
} else {
deployment.Spec.Replicas = pointer.P(replicas)
}
}
obj, exists, _ := r.stores.DeploymentCache.Get(deployment)
if !exists {
r.expectations.Deployment.RaiseExpectations(r.kvKey, 1, 0)
deployment, err := apps.Deployments(kv.Namespace).Create(context.Background(), deployment, metav1.CreateOptions{})
if err != nil {
r.expectations.Deployment.LowerExpectations(r.kvKey, 1, 0)
return nil, fmt.Errorf("unable to create deployment %+v: %v", deployment, err)
}
SetGeneration(&kv.Status.Generations, deployment)
return deployment, nil
}
cachedDeployment := obj.(*appsv1.Deployment)
modified := resourcemerge.BoolPtr(false)
existingCopy := cachedDeployment.DeepCopy()
expectedGeneration := GetExpectedGeneration(deployment, kv.Status.Generations)
resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, deployment.ObjectMeta)
// there was no change to metadata, the generation matched
if !*modified &&
*existingCopy.Spec.Replicas == *deployment.Spec.Replicas &&
existingCopy.GetGeneration() == expectedGeneration {
log.Log.V(4).Infof("deployment %v is up-to-date", deployment.GetName())
return deployment, nil
}
const revisionAnnotation = "deployment.kubernetes.io/revision"
if val, ok := existingCopy.ObjectMeta.Annotations[revisionAnnotation]; ok {
if deployment.ObjectMeta.Annotations == nil {
deployment.ObjectMeta.Annotations = map[string]string{}
}
deployment.ObjectMeta.Annotations[revisionAnnotation] = val
}
ops, err := patch.New(getPatchWithObjectMetaAndSpec([]patch.PatchOption{
patch.WithTest("/metadata/generation", cachedDeployment.ObjectMeta.Generation)},
&deployment.ObjectMeta, deployment.Spec)...).GeneratePayload()
if err != nil {
return nil, err
}
deployment, err = apps.Deployments(kv.Namespace).Patch(context.Background(), deployment.Name, types.JSONPatchType, ops, metav1.PatchOptions{})
if err != nil {
return nil, fmt.Errorf("unable to update deployment %+v: %v", deployment, err)
}
SetGeneration(&kv.Status.Generations, deployment)
log.Log.V(2).Infof("deployment %v updated", deployment.GetName())
return deployment, nil
}
func setMaxUnavailable(daemonSet *appsv1.DaemonSet, maxUnavailable intstr.IntOrString) {
daemonSet.Spec.UpdateStrategy.RollingUpdate = &appsv1.RollingUpdateDaemonSet{
MaxUnavailable: &maxUnavailable,
}
}
func generateDaemonSetPatch(oldDs, newDs *appsv1.DaemonSet) ([]byte, error) {
return patch.New(
getPatchWithObjectMetaAndSpec([]patch.PatchOption{
patch.WithTest("/metadata/generation", oldDs.ObjectMeta.Generation)},
&newDs.ObjectMeta, newDs.Spec)...).GeneratePayload()
}
func (r *Reconciler) patchDaemonSet(oldDs, newDs *appsv1.DaemonSet) (*appsv1.DaemonSet, error) {
patch, err := generateDaemonSetPatch(oldDs, newDs)
if err != nil {
return nil, err
}
newDs, err = r.clientset.AppsV1().DaemonSets(r.kv.Namespace).Patch(
context.Background(),
newDs.Name,
types.JSONPatchType,
patch,
metav1.PatchOptions{})
if err != nil {
return nil, fmt.Errorf("unable to update daemonset %+v: %v", oldDs, err)
}
return newDs, nil
}
func (r *Reconciler) getCanaryPods(daemonSet *appsv1.DaemonSet) []*corev1.Pod {
canaryPods := []*corev1.Pod{}
for _, obj := range r.stores.InfrastructurePodCache.List() {
pod := obj.(*corev1.Pod)
owner := metav1.GetControllerOf(pod)
if owner != nil && owner.Name == daemonSet.Name && util.PodIsUpToDate(pod, r.kv) {
canaryPods = append(canaryPods, pod)
}
}
return canaryPods
}
func (r *Reconciler) howManyUpdatedAndReadyPods(daemonSet *appsv1.DaemonSet) int32 {
var updatedReadyPods int32
for _, obj := range r.stores.InfrastructurePodCache.List() {
pod := obj.(*corev1.Pod)
owner := metav1.GetControllerOf(pod)
if owner != nil && owner.Name == daemonSet.Name && util.PodIsUpToDate(pod, r.kv) && util.PodIsReady(pod) {
updatedReadyPods++
}
}
return updatedReadyPods
}
func daemonHasDefaultRolloutStrategy(daemonSet *appsv1.DaemonSet) bool {
return getMaxUnavailable(daemonSet) == daemonSetDefaultMaxUnavailable.IntValue()
}
func (r *Reconciler) processCanaryUpgrade(cachedDaemonSet, newDS *appsv1.DaemonSet, forceUpdate bool) (bool, error, CanaryUpgradeStatus) {
var updatedAndReadyPods int32
var status CanaryUpgradeStatus
done := false
if hasTLS(cachedDaemonSet) && !hasTLS(newDS) {
insertTLS(newDS)
}
if !hasCertificateSecret(&cachedDaemonSet.Spec.Template.Spec, components.VirtHandlerCertSecretName) &&
hasCertificateSecret(&newDS.Spec.Template.Spec, components.VirtHandlerCertSecretName) {
unattachCertificateSecret(&newDS.Spec.Template.Spec, components.VirtHandlerCertSecretName)
}
log := log.Log.With("resource", fmt.Sprintf("ds/%s", cachedDaemonSet.Name))
isDaemonSetUpdated := util.DaemonSetIsUpToDate(r.kv, cachedDaemonSet) && !forceUpdate
desiredReadyPods := cachedDaemonSet.Status.DesiredNumberScheduled
if isDaemonSetUpdated {
updatedAndReadyPods = r.howManyUpdatedAndReadyPods(cachedDaemonSet)
}
switch {
case updatedAndReadyPods == 0:
if !isDaemonSetUpdated {
// start canary upgrade
setMaxUnavailable(newDS, daemonSetDefaultMaxUnavailable)
_, err := r.patchDaemonSet(cachedDaemonSet, newDS)
if err != nil {
return false, fmt.Errorf("unable to start canary upgrade for daemonset %+v: %v", newDS, err), CanaryUpgradeStatusFailed
}
log.V(2).Infof("daemonSet %v started upgrade", newDS.GetName())
} else {
// check for a crashed canary pod
canaryPods := r.getCanaryPods(cachedDaemonSet)
for _, canary := range canaryPods {
if canary != nil && util.PodIsCrashLooping(canary) {
r.recorder.Eventf(cachedDaemonSet, corev1.EventTypeWarning, failedUpdateDaemonSetReason, "daemonSet %v rollout failed", cachedDaemonSet.Name)
return false, fmt.Errorf("daemonSet %s rollout failed", cachedDaemonSet.Name), CanaryUpgradeStatusFailed
}
}
}
done, status = false, CanaryUpgradeStatusStarted
case updatedAndReadyPods > 0 && updatedAndReadyPods < desiredReadyPods:
if daemonHasDefaultRolloutStrategy(cachedDaemonSet) {
// canary was ok, start real rollout
setMaxUnavailable(newDS, daemonSetFastMaxUnavailable)
// start rollout again
_, err := r.patchDaemonSet(cachedDaemonSet, newDS)
if err != nil {
return false, fmt.Errorf("unable to update daemonset %+v: %v", newDS, err), CanaryUpgradeStatusFailed
}
log.V(2).Infof("daemonSet %v updated", newDS.GetName())
status = CanaryUpgradeStatusUpgradingDaemonSet
} else {
log.V(4).Infof("waiting for all pods of daemonSet %v to be ready", newDS.GetName())
status = CanaryUpgradeStatusWaitingDaemonSetRollout
}
done = false
case updatedAndReadyPods > 0 && updatedAndReadyPods == desiredReadyPods:
// rollout has completed and all virt-handlers are ready
if !daemonHasDefaultRolloutStrategy(cachedDaemonSet) {
// revert maxUnavailable to default value
setMaxUnavailable(newDS, daemonSetDefaultMaxUnavailable)
var err error
newDS, err = r.patchDaemonSet(cachedDaemonSet, newDS)
if err != nil {
return false, err, CanaryUpgradeStatusFailed
}
log.V(2).Infof("daemonSet %v updated back to default", newDS.GetName())
SetGeneration(&r.kv.Status.Generations, newDS)
return false, nil, CanaryUpgradeStatusWaitingDaemonSetRollout
}
if supportsTLS(cachedDaemonSet) {
if !hasTLS(cachedDaemonSet) {
insertTLS(newDS)
_, err := r.patchDaemonSet(cachedDaemonSet, newDS)
log.V(2).Infof("daemonSet %v updated to default CN TLS", newDS.GetName())
SetGeneration(&r.kv.Status.Generations, newDS)
return false, err, CanaryUpgradeStatusWaitingDaemonSetRollout
}
if hasCertificateSecret(&newDS.Spec.Template.Spec, components.VirtHandlerCertSecretName) {
unattachCertificateSecret(&newDS.Spec.Template.Spec, components.VirtHandlerCertSecretName)
var err error
cachedDaemonSet, err = r.patchDaemonSet(cachedDaemonSet, newDS)
if err != nil {
return false, err, CanaryUpgradeStatusFailed
}
log.V(2).Infof("daemonSet %v updated to secure certificates", newDS.GetName())
}
}
SetGeneration(&r.kv.Status.Generations, cachedDaemonSet)
log.V(2).Infof("daemonSet %v is ready", newDS.GetName())
done, status = true, CanaryUpgradeStatusSuccessful
}
return done, nil, status
}
func supportsTLS(daemonSet *appsv1.DaemonSet) bool {
if daemonSet.Labels == nil {
return false
}
value, ok := daemonSet.Labels[components.SupportsMigrationCNsValidation]
return ok && value == "true"
}
func insertTLS(daemonSet *appsv1.DaemonSet) {
daemonSet.Spec.Template.Spec.Containers[0].Args = append(daemonSet.Spec.Template.Spec.Containers[0].Args, "--migration-cn-types", "migration")
}
func hasTLS(daemonSet *appsv1.DaemonSet) bool {
container := &daemonSet.Spec.Template.Spec.Containers[0]
for _, arg := range container.Args {
if strings.Contains(arg, "migration-cn-types") {
return true
}
}
return false
}
func hasCertificateSecret(spec *corev1.PodSpec, secretName string) bool {
for _, volume := range spec.Volumes {
if volume.Name == secretName {
return true
}
}
return false
}
func unattachCertificateSecret(spec *corev1.PodSpec, secretName string) {
newVolumes := []corev1.Volume{}
for _, volume := range spec.Volumes {
if volume.Name != secretName {
newVolumes = append(newVolumes, volume)
}
}
spec.Volumes = newVolumes
newVolumeMounts := []corev1.VolumeMount{}
for _, volumeMount := range spec.Containers[0].VolumeMounts {
if volumeMount.Name != secretName {
newVolumeMounts = append(newVolumeMounts, volumeMount)
}
}
spec.Containers[0].VolumeMounts = newVolumeMounts
}
func getMaxUnavailable(daemonSet *appsv1.DaemonSet) int {
update := daemonSet.Spec.UpdateStrategy.RollingUpdate
if update == nil {
return 0
}
if update.MaxUnavailable != nil {
return update.MaxUnavailable.IntValue()
}
return daemonSetDefaultMaxUnavailable.IntValue()
}
func (r *Reconciler) syncDaemonSet(daemonSet *appsv1.DaemonSet) (bool, error) {
kv := r.kv
daemonSet = daemonSet.DeepCopy()
apps := r.clientset.AppsV1()
imageTag, imageRegistry, id := getTargetVersionRegistryID(kv)
injectOperatorMetadata(kv, &daemonSet.ObjectMeta, imageTag, imageRegistry, id, true)
injectOperatorMetadata(kv, &daemonSet.Spec.Template.ObjectMeta, imageTag, imageRegistry, id, false)
placement.InjectPlacementMetadata(kv.Spec.Workloads, &daemonSet.Spec.Template.Spec, placement.AnyNode)
if daemonSet.GetName() == "virt-handler" {
setMaxDevices(r.kv, daemonSet)
}
var cachedDaemonSet *appsv1.DaemonSet
obj, exists, _ := r.stores.DaemonSetCache.Get(daemonSet)
if !exists {
r.expectations.DaemonSet.RaiseExpectations(r.kvKey, 1, 0)
if supportsTLS(daemonSet) && !hasTLS(daemonSet) {
insertTLS(daemonSet)
unattachCertificateSecret(&daemonSet.Spec.Template.Spec, components.VirtHandlerCertSecretName)
}
daemonSet, err := apps.DaemonSets(kv.Namespace).Create(context.Background(), daemonSet, metav1.CreateOptions{})
if err != nil {
r.expectations.DaemonSet.LowerExpectations(r.kvKey, 1, 0)
return false, fmt.Errorf("unable to create daemonset %+v: %v", daemonSet, err)
}
SetGeneration(&kv.Status.Generations, daemonSet)
return true, nil
}
cachedDaemonSet = obj.(*appsv1.DaemonSet)
modified := resourcemerge.BoolPtr(false)
existingCopy := cachedDaemonSet.DeepCopy()
expectedGeneration := GetExpectedGeneration(daemonSet, kv.Status.Generations)
resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, daemonSet.ObjectMeta)
// there was no change to metadata, the generation was right
if !*modified && existingCopy.GetGeneration() == expectedGeneration {
log.Log.V(4).Infof("daemonset %v is up-to-date", daemonSet.GetName())
return true, nil
}
// canary pod upgrade
// first update virt-handler with maxUnavailable=1
// patch daemonSet with new version
// wait for a new virt-handler to be ready
// set maxUnavailable=10%
// start the rollout of the new virt-handler again
// wait for all nodes to complete the rollout
// set maxUnavailable back to 1
done, err, _ := r.processCanaryUpgrade(cachedDaemonSet, daemonSet, *modified)
return done, err
}
func setMaxDevices(kv *v1.KubeVirt, vh *appsv1.DaemonSet) {
if kv.Spec.Configuration.VirtualMachineInstancesPerNode == nil {
return
}
vh.Spec.Template.Spec.Containers[0].Command = append(vh.Spec.Template.Spec.Containers[0].Command,
"--max-devices",
fmt.Sprintf("%d", *kv.Spec.Configuration.VirtualMachineInstancesPerNode))
}
func (r *Reconciler) syncPodDisruptionBudgetForDeployment(deployment *appsv1.Deployment) error {
kv := r.kv
podDisruptionBudget := components.NewPodDisruptionBudgetForDeployment(deployment)
imageTag, imageRegistry, id := getTargetVersionRegistryID(kv)
injectOperatorMetadata(kv, &podDisruptionBudget.ObjectMeta, imageTag, imageRegistry, id, true)
pdbClient := r.clientset.PolicyV1().PodDisruptionBudgets(deployment.Namespace)
var cachedPodDisruptionBudget *policyv1.PodDisruptionBudget
obj, exists, _ := r.stores.PodDisruptionBudgetCache.Get(podDisruptionBudget)
if podDisruptionBudget.Spec.MinAvailable.IntValue() == 0 {
var err error
if exists {
err = pdbClient.Delete(context.Background(), podDisruptionBudget.Name, metav1.DeleteOptions{})
}
return err
}
if !exists {
r.expectations.PodDisruptionBudget.RaiseExpectations(r.kvKey, 1, 0)
podDisruptionBudget, err := pdbClient.Create(context.Background(), podDisruptionBudget, metav1.CreateOptions{})
if err != nil {
r.expectations.PodDisruptionBudget.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create poddisruptionbudget %+v: %v", podDisruptionBudget, err)
}
log.Log.V(2).Infof("poddisruptionbudget %v created", podDisruptionBudget.GetName())
SetGeneration(&kv.Status.Generations, podDisruptionBudget)
return nil
}
cachedPodDisruptionBudget = obj.(*policyv1.PodDisruptionBudget)
modified := resourcemerge.BoolPtr(false)
existingCopy := cachedPodDisruptionBudget.DeepCopy()
expectedGeneration := GetExpectedGeneration(podDisruptionBudget, kv.Status.Generations)
resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, podDisruptionBudget.ObjectMeta)
// there was no change to metadata or minAvailable, the generation was right
if !*modified &&
existingCopy.Spec.MinAvailable.IntValue() == podDisruptionBudget.Spec.MinAvailable.IntValue() &&
existingCopy.ObjectMeta.Generation == expectedGeneration {
log.Log.V(4).Infof("poddisruptionbudget %v is up-to-date", cachedPodDisruptionBudget.GetName())
return nil
}
patchBytes, err := patch.New(getPatchWithObjectMetaAndSpec([]patch.PatchOption{}, &podDisruptionBudget.ObjectMeta, podDisruptionBudget.Spec)...).GeneratePayload()
if err != nil {
return err
}
podDisruptionBudget, err = pdbClient.Patch(context.Background(), podDisruptionBudget.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch/delete poddisruptionbudget %+v: %v", podDisruptionBudget, err)
}
SetGeneration(&kv.Status.Generations, podDisruptionBudget)
log.Log.V(2).Infof("poddisruptionbudget %v patched", podDisruptionBudget.GetName())
return nil
}
func getDesiredApiReplicas(clientset kubecli.KubevirtClient) (replicas int32, err error) {
nodeList, err := clientset.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", v1.NodeSchedulable, "true"),
})
if err != nil {
return 0, fmt.Errorf("failed to get number of nodes to determine virt-api replicas: %v", err)
}
nodesCount := len(nodeList.Items)
// This is a simple heuristic to achieve basic scalability so we could be running on large clusters.
// From recent experiments we know that for a 100 nodes cluster, 9 virt-api replicas are enough.
// This heuristic is not accurate. It could, and should, be replaced by something more sophisticated and refined
// in the future.
if nodesCount == 1 {
return 1, nil
}
const minReplicas = 2
replicas = int32(nodesCount) / 10
if replicas < minReplicas {
replicas = minReplicas
}
return replicas, nil
}
func replicasAlreadyPatched(patches []v1.CustomizeComponentsPatch, deploymentName string) bool {
for _, patch := range patches {
if patch.ResourceName != deploymentName {
continue
}
decodedPatch, err := jsonpatch.DecodePatch([]byte(patch.Patch))
if err != nil {
log.Log.Warningf("%s", err.Error())
continue
}
for _, operation := range decodedPatch {
path, err := operation.Path()
if err != nil {
log.Log.Warningf("%s", err.Error())
continue
}
op := operation.Kind()
if path == "/spec/replicas" && op == "replace" {
return true
}
}
}
return false
}
package apply
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sv1 "kubevirt.io/api/core/v1"
)
func GetCADuration(config *k8sv1.KubeVirtSelfSignConfiguration) *metav1.Duration {
defaultDuration := &metav1.Duration{Duration: Duration7d}
if config == nil {
return defaultDuration
}
// deprecated, but takes priority to provide a smooth upgrade path
if config.CARotateInterval != nil {
return config.CARotateInterval
}
if config.CA != nil && config.CA.Duration != nil {
return config.CA.Duration
}
return defaultDuration
}
func GetCARenewBefore(config *k8sv1.KubeVirtSelfSignConfiguration) *metav1.Duration {
caDuration := GetCADuration(config)
defaultDuration := &metav1.Duration{Duration: time.Duration(float64(caDuration.Duration) * 0.2)}
if config == nil {
return defaultDuration
}
// deprecated, but takes priority to provide a smooth upgrade path
if config.CAOverlapInterval != nil {
return config.CAOverlapInterval
}
if config.CA != nil && config.CA.RenewBefore != nil {
return config.CA.RenewBefore
}
return defaultDuration
}
func GetCertDuration(config *k8sv1.KubeVirtSelfSignConfiguration) *metav1.Duration {
defaultDuration := &metav1.Duration{Duration: Duration1d}
if config == nil {
return defaultDuration
}
// deprecated, but takes priority to provide a smooth upgrade path
if config.CertRotateInterval != nil {
return config.CertRotateInterval
}
if config.Server != nil && config.Server.Duration != nil {
return config.Server.Duration
}
return defaultDuration
}
func GetCertRenewBefore(config *k8sv1.KubeVirtSelfSignConfiguration) *metav1.Duration {
certDuration := GetCertDuration(config)
defaultDuration := &metav1.Duration{Duration: time.Duration(float64(certDuration.Duration) * 0.2)}
if config == nil {
return defaultDuration
}
if config.Server != nil && config.Server.RenewBefore != nil {
return config.Server.RenewBefore
}
return defaultDuration
}
package apply
import (
"context"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/hex"
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/workqueue"
"kubevirt.io/client-go/log"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/certificates/triple/cert"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/network/multus"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
"kubevirt.io/kubevirt/pkg/virt-operator/util"
)
func (r *Reconciler) syncKubevirtNamespaceLabels() error {
targetNamespace := r.kv.ObjectMeta.Namespace
obj, exists, err := r.stores.NamespaceCache.GetByKey(targetNamespace)
if err != nil {
log.Log.Errorf("Failed to retrieve kubevirt namespace from store. Error: %s", err.Error())
return err
}
if !exists {
return fmt.Errorf("Could not find namespace in store. Namespace key: %s", targetNamespace)
}
cachedNamespace := obj.(*corev1.Namespace)
// Prepare namespace metadata patch
targetLabels := map[string]string{
"openshift.io/cluster-monitoring": "true",
}
cachedLabels := cachedNamespace.ObjectMeta.Labels
labelsToPatch := make(map[string]string)
for targetLabelKey, targetLabelValue := range targetLabels {
cachedLabelValue, ok := cachedLabels[targetLabelKey]
if ok && cachedLabelValue == targetLabelValue {
continue
}
labelsToPatch[targetLabelKey] = targetLabelValue
}
if len(labelsToPatch) == 0 {
log.Log.Infof("Kubevirt namespace (%s) labels are in sync", targetNamespace)
return nil
}
labelsPatch, err := json.Marshal(labelsToPatch)
if err != nil {
log.Log.Errorf("Failed to marshal namespace labels: %s", err.Error())
return err
}
log.Log.Infof("Patching namespace %s with %s", targetNamespace, labelsPatch)
_, err = r.clientset.CoreV1().Namespaces().Patch(context.Background(),
targetNamespace,
types.MergePatchType,
[]byte(fmt.Sprintf(`{"metadata":{"labels": %s}}`, labelsPatch)),
metav1.PatchOptions{},
)
if err != nil {
log.Log.Errorf("Could not patch kubevirt namespace labels: %s", err.Error())
return err
}
log.Log.Infof("kubevirt namespace labels patched")
return nil
}
func (r *Reconciler) createOrUpdateServices() (bool, error) {
for _, service := range r.targetStrategy.Services() {
pending, err := r.createOrUpdateService(service.DeepCopy())
if pending || err != nil {
return pending, err
}
}
return false, nil
}
func (r *Reconciler) createOrUpdateService(service *corev1.Service) (bool, error) {
core := r.clientset.CoreV1()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &service.ObjectMeta, version, imageRegistry, id, true)
obj, exists, _ := r.stores.ServiceCache.Get(service)
if !exists {
r.expectations.Service.RaiseExpectations(r.kvKey, 1, 0)
_, err := core.Services(service.Namespace).Create(context.Background(), service, metav1.CreateOptions{})
if err != nil {
r.expectations.Service.LowerExpectations(r.kvKey, 1, 0)
return false, fmt.Errorf("unable to create service %+v: %v", service, err)
}
return false, nil
}
cachedService := obj.(*corev1.Service)
deleteAndReplace := hasImmutableFieldChanged(service, cachedService)
if deleteAndReplace {
err := deleteService(cachedService, r.kvKey, r.expectations, core)
if err != nil {
return false, err
}
// waiting for old service to be deleted,
// after which the operator will recreate using new spec
return true, nil
}
patchBytes, err := generateServicePatch(cachedService, service)
if err != nil {
return false, fmt.Errorf("unable to generate service endpoint patch operations for %+v: %v", service, err)
}
if len(patchBytes) == 0 {
log.Log.V(4).Infof("service %v is up-to-date", service.GetName())
return false, nil
}
_, err = core.Services(service.Namespace).Patch(context.Background(), service.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return false, fmt.Errorf("unable to patch service %+v: %v", service, err)
}
log.Log.V(2).Infof("service %v patched", service.GetName())
return false, nil
}
func (r *Reconciler) getSecret(secret *corev1.Secret) (*corev1.Secret, bool, error) {
obj, exists, _ := r.stores.SecretCache.Get(secret)
if exists {
return obj.(*corev1.Secret), exists, nil
}
cachedSecret, err := r.clientset.CoreV1().Secrets(secret.Namespace).Get(context.Background(), secret.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return nil, false, nil
}
return nil, false, err
}
return cachedSecret, true, nil
}
func certificationNeedsRotation(secret *corev1.Secret, duration *metav1.Duration, ca *tls.Certificate, renewBefore *metav1.Duration, caRenewBefore *metav1.Duration) bool {
crt, err := components.LoadCertificates(secret)
if err != nil {
log.DefaultLogger().Reason(err).Infof("Failed to load certificate from secret %s, will rotate it.", secret.Name)
return true
}
if secret.Annotations["kubevirt.io/duration"] != duration.String() {
return true
}
rotationTime := components.NextRotationDeadline(crt, ca, renewBefore, caRenewBefore)
// We update the certificate if it has passed its renewal timeout
if rotationTime.Before(time.Now()) {
return true
}
return false
}
func deleteService(service *corev1.Service, kvKey string, expectations *util.Expectations, core typedv1.CoreV1Interface) error {
if service.DeletionTimestamp != nil {
return nil
}
key, err := controller.KeyFunc(service)
if err != nil {
return err
}
gracePeriod := int64(0)
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriod,
}
expectations.Service.AddExpectedDeletion(kvKey, key)
err = core.Services(service.Namespace).Delete(context.Background(), service.Name, deleteOptions)
if err != nil {
expectations.Service.DeletionObserved(kvKey, key)
log.Log.Errorf("Failed to delete service %+v: %v", service, err)
return err
}
log.Log.V(2).Infof("service %v deleted. It must be re-created", service.GetName())
return nil
}
func (r *Reconciler) createOrUpdateCertificateSecret(queue workqueue.TypedRateLimitingInterface[string], ca *tls.Certificate, secret *corev1.Secret, duration *metav1.Duration, renewBefore *metav1.Duration, caRenewBefore *metav1.Duration) (*tls.Certificate, error) {
var cachedSecret *corev1.Secret
var err error
secret = secret.DeepCopy()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &secret.ObjectMeta, version, imageRegistry, id, true)
log.DefaultLogger().V(4).Infof("checking certificate %v", secret.Name)
cachedSecret, exists, err := r.getSecret(secret)
if err != nil {
return nil, err
}
rotateCertificate := false
if exists {
rotateCertificate = certificationNeedsRotation(cachedSecret, duration, ca, renewBefore, caRenewBefore)
}
// populate the secret with correct certificate
if !exists || rotateCertificate {
if err := components.PopulateSecretWithCertificate(secret, ca, duration); err != nil {
return nil, err
}
} else {
secret.Data = cachedSecret.Data
}
crt, err := components.LoadCertificates(secret)
if err != nil {
log.DefaultLogger().Reason(err).Infof("Failed to load certificate from secret %s.", secret.Name)
return nil, err
}
// we need to ensure that we revisit certificates before they expire
wakeupDeadline := components.NextRotationDeadline(crt, ca, renewBefore, caRenewBefore).Sub(time.Now())
queue.AddAfter(r.kvKey, wakeupDeadline)
if !exists {
r.expectations.Secrets.RaiseExpectations(r.kvKey, 1, 0)
_, err := r.clientset.CoreV1().Secrets(secret.Namespace).Create(context.Background(), secret, metav1.CreateOptions{})
if err != nil {
r.expectations.Secrets.LowerExpectations(r.kvKey, 1, 0)
return nil, fmt.Errorf("unable to create secret %+v: %v", secret, err)
}
return crt, nil
}
modified := resourcemerge.BoolPtr(false)
resourcemerge.EnsureObjectMeta(modified, &cachedSecret.ObjectMeta, secret.ObjectMeta)
if !*modified && !rotateCertificate {
log.Log.V(4).Infof("secret %v is up-to-date", secret.GetName())
return crt, nil
}
patchBytes, err := createSecretPatch(secret)
if err != nil {
return nil, err
}
_, err = r.clientset.CoreV1().Secrets(secret.Namespace).Patch(context.Background(), secret.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return nil, fmt.Errorf("unable to patch secret %+v: %v", secret, err)
}
log.Log.V(2).Infof("secret %v updated", secret.GetName())
return crt, nil
}
func createSecretPatch(secret *corev1.Secret) ([]byte, error) {
// Add Labels and Annotations Patches
ops := createLabelsAndAnnotationsPatch(&secret.ObjectMeta)
ops = append(ops, patch.WithReplace("/data", secret.Data))
return patch.New(ops...).GeneratePayload()
}
func (r *Reconciler) createOrUpdateCertificateSecrets(queue workqueue.TypedRateLimitingInterface[string], caCert *tls.Certificate, duration *metav1.Duration, renewBefore *metav1.Duration, caRenewBefore *metav1.Duration) error {
for _, secret := range r.targetStrategy.CertificateSecrets() {
// The CA certificate needs to be handled separately and before other secrets, and ignore export CA
if secret.Name == components.KubeVirtCASecretName || secret.Name == components.KubeVirtExportCASecretName {
continue
}
_, err := r.createOrUpdateCertificateSecret(queue, caCert, secret, duration, renewBefore, caRenewBefore)
if err != nil {
return err
}
}
return nil
}
func getValidCerts(certs []*x509.Certificate) []*tls.Certificate {
externalCAList := make([]*tls.Certificate, 0)
certMap := make(map[string]*x509.Certificate)
for _, cert := range certs {
hash := sha256.Sum256(cert.Raw)
hashString := hex.EncodeToString(hash[:])
if _, ok := certMap[hashString]; ok {
continue
}
tlsCert := &tls.Certificate{
Leaf: cert,
}
now := time.Now()
if now.After(cert.NotBefore) && now.Before(cert.NotAfter) {
// cert is still valid
log.Log.V(2).Infof("adding CA from external CA list because it is still valid %s, %s, now %s, not before %s, not after %s", cert.Issuer, cert.Subject, now.Format(time.RFC3339), cert.NotBefore.Format(time.RFC3339), cert.NotAfter.Format(time.RFC3339))
externalCAList = append(externalCAList, tlsCert)
certMap[hashString] = cert
} else if now.Before(cert.NotBefore) {
log.Log.V(2).Infof("skipping CA from external CA because it is not yet valid %s, %s", cert.Issuer, cert.Subject)
} else if now.After(cert.NotAfter) {
log.Log.V(2).Infof("skipping CA from external CA because it is expired %s, %s", cert.Issuer, cert.Subject)
}
}
return externalCAList
}
func (r *Reconciler) getRemotePublicCas() []*tls.Certificate {
obj, exists, err := r.stores.ConfigMapCache.GetByKey(controller.NamespacedKey(r.kv.Namespace, components.ExternalKubeVirtCAConfigMapName))
if err != nil {
log.Log.Reason(err).Errorf("failed to get external CA configmap")
return nil
} else if !exists {
log.Log.V(3).Infof("external CA configmap does not exist, returning empty list")
return nil
}
externalCaConfigMap := obj.(*corev1.ConfigMap)
externalCAData := externalCaConfigMap.Data[components.CABundleKey]
if externalCAData == "" {
log.Log.V(3).Infof("external CA configmap is empty, returning empty list")
return nil
}
log.Log.V(4).Infof("external CA data: %s", externalCAData)
certs, err := cert.ParseCertsPEM([]byte(externalCAData))
if err != nil {
log.Log.Infof("failed to parse external CA certificates: %v", err)
return nil
}
return getValidCerts(certs)
}
func (r *Reconciler) cleanupExternalCACerts(configMap *corev1.ConfigMap) error {
if configMap == nil {
return nil
}
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &configMap.ObjectMeta, version, imageRegistry, id, true)
configMap.Data = map[string]string{components.CABundleKey: ""}
_, exists, _ := r.stores.ConfigMapCache.Get(configMap)
if !exists {
_, err := r.clientset.CoreV1().ConfigMaps(configMap.Namespace).Create(context.Background(), configMap, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("unable to create configMap %+v: %v", configMap, err)
}
} else {
_, err := r.clientset.CoreV1().ConfigMaps(configMap.Namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("unable to update configMap %+v: %v", configMap, err)
}
}
return nil
}
func (r *Reconciler) createOrUpdateComponentsWithCertificates(queue workqueue.TypedRateLimitingInterface[string]) error {
caDuration := GetCADuration(r.kv.Spec.CertificateRotationStrategy.SelfSigned)
caExportDuration := GetCADuration(r.kv.Spec.CertificateRotationStrategy.SelfSigned)
caRenewBefore := GetCARenewBefore(r.kv.Spec.CertificateRotationStrategy.SelfSigned)
certDuration := GetCertDuration(r.kv.Spec.CertificateRotationStrategy.SelfSigned)
certRenewBefore := GetCertRenewBefore(r.kv.Spec.CertificateRotationStrategy.SelfSigned)
caExportRenewBefore := GetCertRenewBefore(r.kv.Spec.CertificateRotationStrategy.SelfSigned)
// create/update CA Certificate secret
caCert, err := r.createOrUpdateCACertificateSecret(queue, components.KubeVirtCASecretName, caDuration, caRenewBefore)
if err != nil {
return err
}
// create/update export CA Certificate secret
caExportCert, err := r.createOrUpdateCACertificateSecret(queue, components.KubeVirtExportCASecretName, caExportDuration, caExportRenewBefore)
if err != nil {
return err
}
err = r.createExternalKubeVirtCAConfigMap(findRequiredCAConfigMap(components.ExternalKubeVirtCAConfigMapName, r.targetStrategy.ConfigMaps()))
if err != nil {
return err
}
log.Log.V(3).Info("reading external CA configmap")
externalCACerts := r.getRemotePublicCas()
log.Log.V(3).Infof("found %d external CA certificates", len(externalCACerts))
// create/update CA config map
caBundle, err := r.createOrUpdateKubeVirtCAConfigMap(queue, caCert, externalCACerts, caRenewBefore, findRequiredCAConfigMap(components.KubeVirtCASecretName, r.targetStrategy.ConfigMaps()))
if err != nil {
return err
}
err = r.cleanupExternalCACerts(findRequiredCAConfigMap(components.ExternalKubeVirtCAConfigMapName, r.targetStrategy.ConfigMaps()))
if err != nil {
return err
}
// create/update export CA config map
_, err = r.createOrUpdateKubeVirtCAConfigMap(queue, caExportCert, nil, caExportRenewBefore, findRequiredCAConfigMap(components.KubeVirtExportCASecretName, r.targetStrategy.ConfigMaps()))
if err != nil {
return err
}
// create/update ValidatingWebhookConfiguration
err = r.createOrUpdateValidatingWebhookConfigurations(caBundle)
if err != nil {
return err
}
// create/update MutatingWebhookConfiguration
err = r.createOrUpdateMutatingWebhookConfigurations(caBundle)
if err != nil {
return err
}
// create/update APIServices
err = r.createOrUpdateAPIServices(caBundle)
if err != nil {
return err
}
// create/update Routes
err = r.createOrUpdateRoutes(caBundle)
if err != nil {
return err
}
// create/update Certificate secrets
err = r.createOrUpdateCertificateSecrets(queue, caCert, certDuration, certRenewBefore, caRenewBefore)
if err != nil {
return err
}
return nil
}
func shouldEnforceClusterIP(desired, current string) bool {
if desired == "" {
return false
}
return desired != current
}
func getObjectMetaPatch(desired, current metav1.ObjectMeta) []patch.PatchOption {
modified := resourcemerge.BoolPtr(false)
existingCopy := current.DeepCopy()
resourcemerge.EnsureObjectMeta(modified, existingCopy, desired)
if *modified {
// labels and/or annotations modified add patch
return createLabelsAndAnnotationsPatch(&desired)
}
return nil
}
func hasImmutableFieldChanged(service, cachedService *corev1.Service) bool {
deleteAndReplace := false
typeSame := isServiceClusterIP(cachedService) && isServiceClusterIP(service)
if !typeSame || shouldEnforceClusterIP(service.Spec.ClusterIP, cachedService.Spec.ClusterIP) {
deleteAndReplace = true
}
return deleteAndReplace
}
func generateServicePatch(
cachedService *corev1.Service,
service *corev1.Service) ([]byte, error) {
patchOps := getObjectMetaPatch(service.ObjectMeta, cachedService.ObjectMeta)
// set these values in the case they are empty
service.Spec.ClusterIP = cachedService.Spec.ClusterIP
service.Spec.Type = cachedService.Spec.Type
if service.Spec.SessionAffinity == "" {
service.Spec.SessionAffinity = cachedService.Spec.SessionAffinity
}
// If the Specs don't equal each other, replace it
if !equality.Semantic.DeepEqual(cachedService.Spec, service.Spec) {
patchOps = append(patchOps, patch.WithReplace("/spec", service.Spec))
}
patchset := patch.New(patchOps...)
if patchset.IsEmpty() {
return nil, nil
}
return patchset.GeneratePayload()
}
func (r *Reconciler) createOrUpdateServiceAccount(sa *corev1.ServiceAccount) error {
core := r.clientset.CoreV1()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &sa.ObjectMeta, version, imageRegistry, id, true)
obj, exists, _ := r.stores.ServiceAccountCache.Get(sa)
if !exists {
// Create non existent
r.expectations.ServiceAccount.RaiseExpectations(r.kvKey, 1, 0)
_, err := core.ServiceAccounts(r.kv.Namespace).Create(context.Background(), sa, metav1.CreateOptions{})
if err != nil {
r.expectations.ServiceAccount.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create serviceaccount %+v: %v", sa, err)
}
log.Log.V(2).Infof("serviceaccount %v created", sa.GetName())
return nil
}
cachedSa := obj.(*corev1.ServiceAccount)
modified := resourcemerge.BoolPtr(false)
resourcemerge.EnsureObjectMeta(modified, &cachedSa.ObjectMeta, sa.ObjectMeta)
// there was no change to metadata
if !*modified {
// Up to date
log.Log.V(4).Infof("serviceaccount %v already exists and is up-to-date", sa.GetName())
return nil
}
// Patch Labels and Annotations
labelAnnotationPatch, err := patch.New(createLabelsAndAnnotationsPatch(&sa.ObjectMeta)...).GeneratePayload()
if err != nil {
return err
}
_, err = core.ServiceAccounts(r.kv.Namespace).Patch(context.Background(), sa.Name, types.JSONPatchType, labelAnnotationPatch, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch serviceaccount %+v: %v", sa, err)
}
log.Log.V(2).Infof("serviceaccount %v updated", sa.GetName())
return nil
}
func (r *Reconciler) createOrUpdateRbac() error {
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
// create/update ServiceAccounts
for _, sa := range r.targetStrategy.ServiceAccounts() {
if err := r.createOrUpdateServiceAccount(sa.DeepCopy()); err != nil {
return err
}
}
// create/update ClusterRoles
for _, cr := range r.targetStrategy.ClusterRoles() {
err := r.createOrUpdateClusterRole(cr, version, imageRegistry, id)
if err != nil {
return err
}
}
// create/update ClusterRoleBindings
for _, crb := range r.targetStrategy.ClusterRoleBindings() {
err := r.createOrUpdateClusterRoleBinding(crb, version, imageRegistry, id)
if err != nil {
return err
}
}
// create/update Roles
for _, role := range r.targetStrategy.Roles() {
err := r.createOrUpdateRole(role, version, imageRegistry, id)
if err != nil {
return err
}
}
// create/update RoleBindings
for _, rb := range r.targetStrategy.RoleBindings() {
err := r.createOrUpdateRoleBinding(rb, version, imageRegistry, id)
if err != nil {
return err
}
}
return nil
}
func findRequiredCAConfigMap(name string, configmaps []*corev1.ConfigMap) *corev1.ConfigMap {
for _, cm := range configmaps {
if cm.Name != name {
continue
}
return cm.DeepCopy()
}
return nil
}
func buildCertMap(certs []*x509.Certificate) map[string]*x509.Certificate {
certMap := make(map[string]*x509.Certificate)
for _, cert := range certs {
hash := sha256.Sum256(cert.Raw)
hashString := hex.EncodeToString(hash[:])
certMap[hashString] = cert
}
return certMap
}
func shouldUpdateBundle(required, existing *corev1.ConfigMap, key string, queue workqueue.TypedRateLimitingInterface[string], caCert *tls.Certificate, externalCACerts []*tls.Certificate, overlapInterval *metav1.Duration) (bool, error) {
bundle, certCount, err := components.MergeCABundle(caCert, []byte(existing.Data[components.CABundleKey]), overlapInterval.Duration)
if err != nil {
// the only error that can be returned form MergeCABundle is if the CA caBundle
// is unable to be parsed. If we can not parse it we should update it
return true, err
}
// ensure that we remove the old CA after the overlap period
if certCount > 1 {
queue.AddAfter(key, overlapInterval.Duration)
}
bundleCerts, err := cert.ParseCertsPEM(bundle)
if err != nil {
return true, err
}
bundleCertMap := buildCertMap(bundleCerts)
bundleString := string(bundle)
for _, externalCACert := range externalCACerts {
pem := string(cert.EncodeCertPEM(externalCACert.Leaf))
hash := sha256.Sum256(externalCACert.Leaf.Raw)
hashString := hex.EncodeToString(hash[:])
if _, ok := bundleCertMap[hashString]; ok {
continue
}
bundleCertMap[hashString] = externalCACert.Leaf
bundleString += pem
}
required.Data = map[string]string{components.CABundleKey: bundleString}
return !equality.Semantic.DeepEqual(required.Data, existing.Data), nil
}
func (r *Reconciler) createExternalKubeVirtCAConfigMap(configMap *corev1.ConfigMap) error {
if configMap == nil {
log.Log.V(2).Infof("cannot create external CA configmap because it is nil")
return nil
}
_, exists, _ := r.stores.ConfigMapCache.Get(configMap)
if !exists {
log.Log.V(4).Infof("checking external ca config map %v", configMap.Name)
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &configMap.ObjectMeta, version, imageRegistry, id, true)
configMap.Data = map[string]string{components.CABundleKey: ""}
_, err := r.clientset.CoreV1().ConfigMaps(configMap.Namespace).Create(context.Background(), configMap, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("unable to create configMap %+v: %v", configMap, err)
}
}
return nil
}
func (r *Reconciler) createOrUpdateKubeVirtCAConfigMap(queue workqueue.TypedRateLimitingInterface[string], caCert *tls.Certificate, externalCACerts []*tls.Certificate, overlapInterval *metav1.Duration, configMap *corev1.ConfigMap) (caBundle []byte, err error) {
if configMap == nil {
return nil, nil
}
log.DefaultLogger().V(4).Infof("checking ca config map %v", configMap.Name)
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &configMap.ObjectMeta, version, imageRegistry, id, true)
data := ""
obj, exists, _ := r.stores.ConfigMapCache.Get(configMap)
if !exists {
for _, externalCert := range externalCACerts {
data = data + string(cert.EncodeCertPEM(externalCert.Leaf))
}
data = data + string(cert.EncodeCertPEM(caCert.Leaf))
configMap.Data = map[string]string{components.CABundleKey: data}
r.expectations.ConfigMap.RaiseExpectations(r.kvKey, 1, 0)
_, err := r.clientset.CoreV1().ConfigMaps(configMap.Namespace).Create(context.Background(), configMap, metav1.CreateOptions{})
if err != nil {
r.expectations.ConfigMap.LowerExpectations(r.kvKey, 1, 0)
return nil, fmt.Errorf("unable to create configMap %+v: %v", configMap, err)
}
return []byte(configMap.Data[components.CABundleKey]), nil
}
existing := obj.(*corev1.ConfigMap)
updateBundle, err := shouldUpdateBundle(configMap, existing, r.kvKey, queue, caCert, externalCACerts, overlapInterval)
if err != nil {
if !updateBundle {
return nil, err
}
data = data + string(cert.EncodeCertPEM(caCert.Leaf))
configMap.Data = map[string]string{components.CABundleKey: data}
log.Log.Reason(err).V(2).Infof("There was an error validating the CA bundle stored in configmap %s. We are updating the bundle.", configMap.GetName())
}
modified := resourcemerge.BoolPtr(false)
resourcemerge.EnsureObjectMeta(modified, &existing.DeepCopy().ObjectMeta, configMap.ObjectMeta)
if !*modified && !updateBundle {
log.Log.V(4).Infof("configMap %v is up-to-date", configMap.GetName())
return []byte(configMap.Data[components.CABundleKey]), nil
}
patchBytes, err := createConfigMapPatch(configMap)
if err != nil {
return nil, err
}
_, err = r.clientset.CoreV1().ConfigMaps(configMap.Namespace).Patch(context.Background(), configMap.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return nil, fmt.Errorf("unable to patch configMap %+v: %v", configMap, err)
}
log.Log.V(2).Infof("configMap %v updated", configMap.GetName())
return []byte(configMap.Data[components.CABundleKey]), nil
}
func createConfigMapPatch(configMap *corev1.ConfigMap) ([]byte, error) {
// Add Labels and Annotations Patches
ops := createLabelsAndAnnotationsPatch(&configMap.ObjectMeta)
ops = append(ops, patch.WithReplace("/data", configMap.Data))
return patch.New(ops...).GeneratePayload()
}
func (r *Reconciler) createOrUpdateCACertificateSecret(queue workqueue.TypedRateLimitingInterface[string], name string, duration *metav1.Duration, renewBefore *metav1.Duration) (caCert *tls.Certificate, err error) {
for _, secret := range r.targetStrategy.CertificateSecrets() {
// Only work on the ca secrets
if secret.Name != name {
continue
}
cert, err := r.createOrUpdateCertificateSecret(queue, nil, secret, duration, renewBefore, nil)
if err != nil {
return nil, err
}
caCert = cert
}
return caCert, nil
}
func (r *Reconciler) updateSynchronizationAddress() (err error) {
if !r.isFeatureGateEnabled(featuregate.DecentralizedLiveMigration) {
r.kv.Status.SynchronizationAddresses = nil
return nil
}
// Find the lease associated with the virt-synchronization controller
lease, err := r.clientset.CoordinationV1().Leases(r.kv.Namespace).Get(context.Background(), components.VirtSynchronizationControllerName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
podName := ""
if lease.Spec.HolderIdentity != nil {
podName = *lease.Spec.HolderIdentity
}
if podName == "" {
return nil
}
pod, err := r.clientset.CoreV1().Pods(r.kv.Namespace).Get(context.Background(), podName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
// Check for the migration network address in the pod annotations.
ips := r.getIpsFromAnnotations(pod)
if len(ips) == 0 && pod.Status.PodIPs != nil {
// Did not find annotations, use the pod ip address instead
ips = make([]string, len(pod.Status.PodIPs))
for i, podIP := range pod.Status.PodIPs {
ips[i] = podIP.IP
}
}
if len(ips) == 0 {
return nil
}
port := util.DefaultSynchronizationPort
if r.kv.Spec.SynchronizationPort != "" {
p, err := strconv.Atoi(r.kv.Spec.SynchronizationPort)
if err != nil {
return err
}
port = int32(p)
}
addresses := make([]string, len(ips))
for i, ip := range ips {
addresses[i] = fmt.Sprintf("%s:%d", ip, port)
}
r.kv.Status.SynchronizationAddresses = addresses
return nil
}
func (r *Reconciler) getIpsFromAnnotations(pod *corev1.Pod) []string {
networkStatuses := multus.NetworkStatusesFromPod(pod)
for _, networkStatus := range networkStatuses {
if networkStatus.Interface == v1.MigrationInterfaceName {
if len(networkStatus.IPs) == 0 {
break
}
log.Log.Object(pod).V(4).Infof("found migration network ip addresses %v", networkStatus.IPs)
return networkStatus.IPs
}
}
log.Log.Object(pod).V(4).Infof("didn't find migration network ip in annotations %v", pod.Annotations)
return nil
}
package apply
import (
"context"
"fmt"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
)
func getSubresourcesForVersion(crd *extv1.CustomResourceDefinition, version string) *extv1.CustomResourceSubresources {
for _, v := range crd.Spec.Versions {
if version == v.Name {
return v.Subresources
}
}
return nil
}
func needsSubresourceStatusEnable(crd, cachedCrd *extv1.CustomResourceDefinition) bool {
for _, version := range crd.Spec.Versions {
if version.Subresources != nil && version.Subresources.Status != nil {
subresource := getSubresourcesForVersion(cachedCrd, version.Name)
if subresource == nil || subresource.Status == nil {
return true
}
}
}
return false
}
func needsSubresourceStatusDisable(crdTargetVersion *extv1.CustomResourceDefinitionVersion, cachedCrd *extv1.CustomResourceDefinition) bool {
// subresource support needs to be introduced carefully after the control plane roll-over
// to avoid creating zombie entities which don't get processed due to ignored status updates
cachedSubresource := getSubresourcesForVersion(cachedCrd, crdTargetVersion.Name)
return (cachedSubresource == nil || cachedSubresource.Status == nil) &&
(crdTargetVersion.Subresources != nil && crdTargetVersion.Subresources.Status != nil)
}
func patchCRD(client clientset.Interface, crd *extv1.CustomResourceDefinition, ops []patch.PatchOption) (*extv1.CustomResourceDefinition, error) {
name := crd.GetName()
ops = append(ops, patch.WithReplace("/spec", crd.Spec))
patchBytes, err := patch.New(ops...).GeneratePayload()
if err != nil {
return nil, err
}
crd, err = client.ApiextensionsV1().CustomResourceDefinitions().Patch(context.Background(), name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return nil, fmt.Errorf("unable to patch crd %+v: %v", crd, err)
}
log.Log.V(2).Infof("crd %v updated", name)
return crd, nil
}
func (r *Reconciler) createOrUpdateCrds() error {
for _, crd := range r.targetStrategy.CRDs() {
err := r.createOrUpdateCrd(crd)
if err != nil {
return err
}
}
return nil
}
func (r *Reconciler) createOrUpdateCrd(crd *extv1.CustomResourceDefinition) error {
client := r.clientset.ExtensionsClient()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
var cachedCrd *extv1.CustomResourceDefinition
crd = crd.DeepCopy()
injectOperatorMetadata(r.kv, &crd.ObjectMeta, version, imageRegistry, id, true)
obj, exists, _ := r.stores.OperatorCrdCache.Get(crd)
if !exists {
// Create non existent
r.expectations.OperatorCrd.RaiseExpectations(r.kvKey, 1, 0)
createdCRD, err := client.ApiextensionsV1().CustomResourceDefinitions().Create(context.Background(), crd, metav1.CreateOptions{})
if err != nil {
r.expectations.OperatorCrd.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create crd %+v: %v", crd, err)
}
SetGeneration(&r.kv.Status.Generations, createdCRD)
log.Log.V(2).Infof("crd %v created", crd.GetName())
return nil
}
cachedCrd = obj.(*extv1.CustomResourceDefinition)
modified := resourcemerge.BoolPtr(false)
expectedGeneration := GetExpectedGeneration(crd, r.kv.Status.Generations)
resourcemerge.EnsureObjectMeta(modified, &cachedCrd.ObjectMeta, crd.ObjectMeta)
// there was no change to metadata, the generation was right
if !*modified && cachedCrd.GetGeneration() == expectedGeneration {
log.Log.V(4).Infof("crd %v is up-to-date", crd.GetName())
return nil
}
// Patch if old version
for i := range crd.Spec.Versions {
if needsSubresourceStatusDisable(&crd.Spec.Versions[i], cachedCrd) {
crd.Spec.Versions[i].Subresources.Status = nil
}
}
// Add Labels and Annotations Patches
crd, err := patchCRD(client, crd, createLabelsAndAnnotationsPatch(&crd.ObjectMeta))
if err != nil {
return err
}
SetGeneration(&r.kv.Status.Generations, crd)
return nil
}
func (r *Reconciler) rolloutNonCompatibleCRDChanges() error {
for _, crd := range r.targetStrategy.CRDs() {
err := r.rolloutNonCompatibleCRDChange(crd)
if err != nil {
return err
}
}
return nil
}
func (r *Reconciler) rolloutNonCompatibleCRDChange(crd *extv1.CustomResourceDefinition) error {
client := r.clientset.ExtensionsClient()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
var cachedCrd *extv1.CustomResourceDefinition
crd = crd.DeepCopy()
obj, exists, err := r.stores.OperatorCrdCache.Get(crd)
if !exists {
return err
}
cachedCrd = obj.(*extv1.CustomResourceDefinition)
injectOperatorMetadata(r.kv, &crd.ObjectMeta, version, imageRegistry, id, true)
if objectMatchesVersion(&cachedCrd.ObjectMeta, version, imageRegistry, id, r.kv.GetGeneration()) {
// Patch if in the deployed version the subresource is not enabled
if !needsSubresourceStatusEnable(crd, cachedCrd) {
return nil
}
// enable the status subresources now, in case that they were disabled before
if _, err := patchCRD(client, crd, []patch.PatchOption{}); err != nil {
return err
}
return nil
}
log.Log.V(4).Infof("crd %v is up-to-date", crd.GetName())
return nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
"context"
"fmt"
"strings"
routev1 "github.com/openshift/api/route/v1"
secv1 "github.com/openshift/api/security/v1"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
rbacv1 "k8s.io/api/rbac/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/install"
"kubevirt.io/kubevirt/pkg/virt-operator/util"
)
const (
castFailedFmt = "Cast failed! obj: %+v"
deleteFailedFmt = "Failed to delete %s: %v"
)
func deleteDummyWebhookValidators(kv *v1.KubeVirt,
clientset kubecli.KubevirtClient,
stores util.Stores,
expectations *util.Expectations) error {
kvkey, err := controller.KeyFunc(kv)
if err != nil {
return err
}
gracePeriod := int64(0)
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriod,
}
objects := stores.ValidationWebhookCache.List()
for _, obj := range objects {
if webhook, ok := obj.(*admissionregistrationv1.ValidatingWebhookConfiguration); ok {
if !strings.HasPrefix(webhook.Name, "virt-operator-tmp-webhook") {
continue
}
if webhook.DeletionTimestamp != nil {
continue
}
if key, err := controller.KeyFunc(webhook); err == nil {
expectations.ValidationWebhook.AddExpectedDeletion(kvkey, key)
err = clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.Background(), webhook.Name, deleteOptions)
if err != nil {
expectations.ValidationWebhook.DeletionObserved(kvkey, key)
return fmt.Errorf("unable to delete validation webhook: %v", err)
}
log.Log.V(2).Infof("Temporary blocking validation webhook %s deleted", webhook.Name)
}
}
}
return nil
}
func DeleteAll(kv *v1.KubeVirt,
stores util.Stores,
clientset kubecli.KubevirtClient,
aggregatorclient install.APIServiceInterface,
expectations *util.Expectations) error {
kvkey, err := controller.KeyFunc(kv)
if err != nil {
return err
}
gracePeriod := int64(0)
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriod,
}
// first delete CRDs only
err = crdHandleDeletion(kvkey, stores, clientset, expectations)
if err != nil {
return err
}
if !util.IsStoreEmpty(stores.OperatorCrdCache) {
// wait until CRDs are gone
return nil
}
// delete daemonsets
objects := stores.DaemonSetCache.List()
for _, obj := range objects {
if ds, ok := obj.(*appsv1.DaemonSet); ok && ds.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(ds); err == nil {
expectations.DaemonSet.AddExpectedDeletion(kvkey, key)
err := clientset.AppsV1().DaemonSets(ds.Namespace).Delete(context.Background(), ds.Name, deleteOptions)
if err != nil {
expectations.DaemonSet.DeletionObserved(kvkey, key)
log.Log.Errorf(deleteFailedFmt, ds.Name, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete podDisruptionBudgets
objects = stores.PodDisruptionBudgetCache.List()
for _, obj := range objects {
if pdb, ok := obj.(*policyv1.PodDisruptionBudget); ok && pdb.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(pdb); err == nil {
pdbClient := clientset.PolicyV1().PodDisruptionBudgets(pdb.Namespace)
expectations.PodDisruptionBudget.AddExpectedDeletion(kvkey, key)
err = pdbClient.Delete(context.Background(), pdb.Name, metav1.DeleteOptions{})
if err != nil {
expectations.PodDisruptionBudget.DeletionObserved(kvkey, key)
log.Log.Errorf(deleteFailedFmt, pdb.Name, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete deployments
objects = stores.DeploymentCache.List()
for _, obj := range objects {
if depl, ok := obj.(*appsv1.Deployment); ok && depl.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(depl); err == nil {
expectations.Deployment.AddExpectedDeletion(kvkey, key)
err = clientset.AppsV1().Deployments(depl.Namespace).Delete(context.Background(), depl.Name, deleteOptions)
if err != nil {
expectations.Deployment.DeletionObserved(kvkey, key)
log.Log.Errorf(deleteFailedFmt, depl.Name, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete validatingwebhooks
objects = stores.ValidationWebhookCache.List()
for _, obj := range objects {
if webhookConfiguration, ok := obj.(*admissionregistrationv1.ValidatingWebhookConfiguration); ok && webhookConfiguration.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(webhookConfiguration); err == nil {
expectations.ValidationWebhook.AddExpectedDeletion(kvkey, key)
err := clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.Background(), webhookConfiguration.Name, deleteOptions)
if err != nil {
expectations.ValidationWebhook.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete validatingwebhook %+v: %v", webhookConfiguration, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete mutatingwebhooks
objects = stores.MutatingWebhookCache.List()
for _, obj := range objects {
if webhookConfiguration, ok := obj.(*admissionregistrationv1.MutatingWebhookConfiguration); ok && webhookConfiguration.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(webhookConfiguration); err == nil {
expectations.MutatingWebhook.AddExpectedDeletion(kvkey, key)
err := clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.Background(), webhookConfiguration.Name, deleteOptions)
if err != nil {
expectations.MutatingWebhook.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete mutatingwebhook %+v: %v", webhookConfiguration, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete apiservices
objects = stores.APIServiceCache.List()
for _, obj := range objects {
if apiservice, ok := obj.(*apiregv1.APIService); ok && apiservice.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(apiservice); err == nil {
expectations.APIService.AddExpectedDeletion(kvkey, key)
err := aggregatorclient.Delete(context.Background(), apiservice.Name, deleteOptions)
if err != nil {
expectations.APIService.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete apiservice %+v: %v", apiservice, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete services
objects = stores.ServiceCache.List()
for _, obj := range objects {
if svc, ok := obj.(*corev1.Service); ok && svc.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(svc); err == nil {
expectations.Service.AddExpectedDeletion(kvkey, key)
err := clientset.CoreV1().Services(svc.Namespace).Delete(context.Background(), svc.Name, deleteOptions)
if err != nil {
expectations.Service.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete service %+v: %v", svc, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete serviceMonitor
prometheusClient := clientset.PrometheusClient()
objects = stores.ServiceMonitorCache.List()
for _, obj := range objects {
if serviceMonitor, ok := obj.(*promv1.ServiceMonitor); ok && serviceMonitor.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(serviceMonitor); err == nil {
expectations.ServiceMonitor.AddExpectedDeletion(kvkey, key)
err := prometheusClient.MonitoringV1().ServiceMonitors(serviceMonitor.Namespace).Delete(context.Background(), serviceMonitor.Name, deleteOptions)
if err != nil {
expectations.ServiceMonitor.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete serviceMonitor %+v: %v", serviceMonitor, err)
return err
}
expectations.ServiceMonitor.DeletionObserved(kvkey, key)
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete PrometheusRules
objects = stores.PrometheusRuleCache.List()
for _, obj := range objects {
if prometheusRule, ok := obj.(*promv1.PrometheusRule); ok && prometheusRule.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(prometheusRule); err == nil {
expectations.PrometheusRule.AddExpectedDeletion(kvkey, key)
err := prometheusClient.MonitoringV1().PrometheusRules(prometheusRule.Namespace).Delete(context.Background(), prometheusRule.Name, deleteOptions)
if err != nil {
log.Log.Errorf("Failed to delete prometheusRule %+v: %v", prometheusRule, err)
expectations.PrometheusRule.DeletionObserved(kvkey, key)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
// delete RBAC
objects = stores.ClusterRoleBindingCache.List()
for _, obj := range objects {
if crb, ok := obj.(*rbacv1.ClusterRoleBinding); ok && crb.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(crb); err == nil {
expectations.ClusterRoleBinding.AddExpectedDeletion(kvkey, key)
err := clientset.RbacV1().ClusterRoleBindings().Delete(context.Background(), crb.Name, deleteOptions)
if err != nil {
expectations.ClusterRoleBinding.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete crb %+v: %v", crb, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.ClusterRoleCache.List()
for _, obj := range objects {
if cr, ok := obj.(*rbacv1.ClusterRole); ok && cr.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(cr); err == nil {
expectations.ClusterRole.AddExpectedDeletion(kvkey, key)
err := clientset.RbacV1().ClusterRoles().Delete(context.Background(), cr.Name, deleteOptions)
if err != nil {
expectations.ClusterRole.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete cr %+v: %v", cr, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.RoleBindingCache.List()
for _, obj := range objects {
if rb, ok := obj.(*rbacv1.RoleBinding); ok && rb.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(rb); err == nil {
expectations.RoleBinding.AddExpectedDeletion(kvkey, key)
err := clientset.RbacV1().RoleBindings(kv.Namespace).Delete(context.Background(), rb.Name, deleteOptions)
if err != nil {
expectations.RoleBinding.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete rb %+v: %v", rb, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.RoleCache.List()
for _, obj := range objects {
if role, ok := obj.(*rbacv1.Role); ok && role.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(role); err == nil {
expectations.Role.AddExpectedDeletion(kvkey, key)
err := clientset.RbacV1().Roles(kv.Namespace).Delete(context.Background(), role.Name, deleteOptions)
if err != nil {
expectations.Role.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete role %+v: %v", role, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.ServiceAccountCache.List()
for _, obj := range objects {
if sa, ok := obj.(*corev1.ServiceAccount); ok && sa.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(sa); err == nil {
expectations.ServiceAccount.AddExpectedDeletion(kvkey, key)
err := clientset.CoreV1().ServiceAccounts(kv.Namespace).Delete(context.Background(), sa.Name, deleteOptions)
if err != nil {
expectations.ServiceAccount.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete serviceaccount %+v: %v", sa, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.SecretCache.List()
for _, obj := range objects {
if secret, ok := obj.(*corev1.Secret); ok && secret.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(secret); err == nil {
expectations.Secrets.AddExpectedDeletion(kvkey, key)
err := clientset.CoreV1().Secrets(kv.Namespace).Delete(context.Background(), secret.Name, deleteOptions)
if err != nil {
expectations.Secrets.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete secret %+v: %v", secret, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.ConfigMapCache.List()
for _, obj := range objects {
if configMap, ok := obj.(*corev1.ConfigMap); ok && configMap.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(configMap); err == nil {
expectations.ConfigMap.AddExpectedDeletion(kvkey, key)
err := clientset.CoreV1().ConfigMaps(kv.Namespace).Delete(context.Background(), configMap.Name, deleteOptions)
if err != nil {
expectations.ConfigMap.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete configMap %+v: %v", configMap, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
scc := clientset.SecClient()
objects = stores.SCCCache.List()
for _, obj := range objects {
if s, ok := obj.(*secv1.SecurityContextConstraints); ok && s.DeletionTimestamp == nil {
// informer watches all SCC objects, it cannot be changed because of kubevirt updates
if !util.IsManagedByOperator(s.GetLabels()) {
continue
}
if key, err := controller.KeyFunc(s); err == nil {
expectations.SCC.AddExpectedDeletion(kvkey, key)
err := scc.SecurityContextConstraints().Delete(context.Background(), s.Name, deleteOptions)
if err != nil {
expectations.SCC.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete SecurityContextConstraints %+v: %v", s, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.RouteCache.List()
for _, obj := range objects {
if route, ok := obj.(*routev1.Route); ok && route.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(route); err == nil {
expectations.Route.AddExpectedDeletion(kvkey, key)
err := clientset.RouteClient().Routes(kv.Namespace).Delete(context.Background(), route.Name, deleteOptions)
if err != nil {
expectations.Route.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete route %+v: %v", route, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.ValidatingAdmissionPolicyBindingCache.List()
for _, obj := range objects {
if validatingAdmissionPolicyBinding, ok := obj.(*admissionregistrationv1.ValidatingAdmissionPolicyBinding); ok && validatingAdmissionPolicyBinding.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(validatingAdmissionPolicyBinding); err == nil {
expectations.ValidatingAdmissionPolicyBinding.AddExpectedDeletion(kvkey, key)
err := clientset.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().Delete(context.Background(), validatingAdmissionPolicyBinding.Name, deleteOptions)
if err != nil {
expectations.ValidatingAdmissionPolicyBinding.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete validatingAdmissionPolicyBinding %+v: %v", validatingAdmissionPolicyBinding, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
objects = stores.ValidatingAdmissionPolicyCache.List()
for _, obj := range objects {
if validatingAdmissionPolicy, ok := obj.(*admissionregistrationv1.ValidatingAdmissionPolicy); ok && validatingAdmissionPolicy.DeletionTimestamp == nil {
if key, err := controller.KeyFunc(validatingAdmissionPolicy); err == nil {
expectations.ValidatingAdmissionPolicy.AddExpectedDeletion(kvkey, key)
err := clientset.AdmissionregistrationV1().ValidatingAdmissionPolicies().Delete(context.Background(), validatingAdmissionPolicy.Name, deleteOptions)
if err != nil {
expectations.ValidatingAdmissionPolicy.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete validatingAdmissionPolicy %+v: %v", validatingAdmissionPolicy, err)
return err
}
}
} else if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
}
if err = deleteKubeVirtLabelsFromNodes(clientset); err != nil {
return err
}
err = deleteDummyWebhookValidators(kv, clientset, stores, expectations)
if err != nil {
return err
}
return nil
}
func deleteKubeVirtLabelsFromNodes(clientset kubecli.KubevirtClient) error {
nodeList, err := clientset.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: v1.NodeSchedulable})
if err != nil {
return fmt.Errorf("failed to list nodes: %v", err)
}
for _, node := range nodeList.Items {
labels := node.GetLabels()
if labels == nil {
continue
}
patchSet := patch.New()
for labelkey := range labels {
if strings.HasPrefix(labelkey, "kubevirt.io/") {
patchSet.AddOption(patch.WithRemove(fmt.Sprintf("/metadata/labels/%s", patch.EscapeJSONPointer(labelkey))))
}
}
if patchSet.IsEmpty() {
continue
}
payload, err := patchSet.GeneratePayload()
if err != nil {
return fmt.Errorf("failed to generate patch payload: %v", err)
}
if _, err = clientset.CoreV1().Nodes().Patch(context.Background(), node.Name, types.JSONPatchType, payload, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("failed to update labels for node %s: %v", node.Name, err)
}
log.Log.Infof("removed kubevirt labels from node %s", node.Name)
}
return nil
}
func crdInstanceDeletionCompleted(crd *extv1.CustomResourceDefinition) bool {
// Below is an example of what is being looked for here.
// The CRD will have this condition once a CRD which is being
// deleted has all instances removed related to this CRD.
//
// message: removed all instances
// reason: InstanceDeletionCompleted
// status: "False"
// type: Terminating
if crd.DeletionTimestamp == nil {
return false
}
for _, condition := range crd.Status.Conditions {
if condition.Type == extv1.Terminating &&
condition.Status == extv1.ConditionFalse &&
condition.Reason == "InstanceDeletionCompleted" {
return true
}
}
return false
}
func crdFilterNeedFinalizerAdded(crds []*extv1.CustomResourceDefinition) []*extv1.CustomResourceDefinition {
filtered := []*extv1.CustomResourceDefinition{}
for _, crd := range crds {
if crd.DeletionTimestamp == nil && !controller.HasFinalizer(crd, v1.VirtOperatorComponentFinalizer) {
filtered = append(filtered, crd)
}
}
return filtered
}
func crdFilterNeedDeletion(crds []*extv1.CustomResourceDefinition) []*extv1.CustomResourceDefinition {
filtered := []*extv1.CustomResourceDefinition{}
for _, crd := range crds {
if crd.DeletionTimestamp == nil {
filtered = append(filtered, crd)
}
}
return filtered
}
func crdFilterNeedFinalizerRemoved(crds []*extv1.CustomResourceDefinition) []*extv1.CustomResourceDefinition {
filtered := []*extv1.CustomResourceDefinition{}
for _, crd := range crds {
if !crdInstanceDeletionCompleted(crd) {
// All crds must have all crs removed before any CRD finalizer can be removed
return []*extv1.CustomResourceDefinition{}
} else if controller.HasFinalizer(crd, v1.VirtOperatorComponentFinalizer) {
filtered = append(filtered, crd)
}
}
return filtered
}
func crdHandleDeletion(kvkey string,
stores util.Stores,
clientset kubecli.KubevirtClient,
expectations *util.Expectations) error {
ext := clientset.ExtensionsClient()
objects := stores.OperatorCrdCache.List()
finalizerPath := "/metadata/finalizers"
crds := []*extv1.CustomResourceDefinition{}
for _, obj := range objects {
crd, ok := obj.(*extv1.CustomResourceDefinition)
if !ok {
log.Log.Errorf(castFailedFmt, obj)
return nil
}
crds = append(crds, crd)
}
needFinalizerAdded := crdFilterNeedFinalizerAdded(crds)
needDeletion := crdFilterNeedDeletion(crds)
needFinalizerRemoved := crdFilterNeedFinalizerRemoved(crds)
for _, crd := range needFinalizerAdded {
crdCopy := crd.DeepCopy()
controller.AddFinalizer(crdCopy, v1.VirtOperatorComponentFinalizer)
payload, err := patch.New(patch.WithAdd(finalizerPath, crdCopy.Finalizers)).GeneratePayload()
if err != nil {
return fmt.Errorf("failed to generate patch payload: %v", err)
}
_, err = ext.ApiextensionsV1().CustomResourceDefinitions().Patch(context.Background(), crd.Name, types.JSONPatchType, payload, metav1.PatchOptions{})
if err != nil {
return err
}
}
for _, crd := range needDeletion {
key, err := controller.KeyFunc(crd)
if err != nil {
return err
}
expectations.OperatorCrd.AddExpectedDeletion(kvkey, key)
err = ext.ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.Name, metav1.DeleteOptions{})
if err != nil {
expectations.OperatorCrd.DeletionObserved(kvkey, key)
log.Log.Errorf("Failed to delete crd %+v: %v", crd, err)
return err
}
}
for _, crd := range needFinalizerRemoved {
patchSet := patch.New()
if len(crd.Finalizers) > 1 {
crdCopy := crd.DeepCopy()
controller.RemoveFinalizer(crdCopy, v1.VirtOperatorComponentFinalizer)
patchSet.AddOption(
patch.WithTest(finalizerPath, crd.Finalizers),
patch.WithReplace(finalizerPath, crdCopy.Finalizers),
)
} else {
patchSet.AddOption(patch.WithRemove(finalizerPath))
}
payload, err := patchSet.GeneratePayload()
if err != nil {
return fmt.Errorf("failed to generate patch payload: %v", err)
}
_, err = ext.ApiextensionsV1().CustomResourceDefinitions().Patch(context.Background(), crd.Name, types.JSONPatchType, payload, metav1.PatchOptions{})
if err != nil {
return err
}
}
return nil
}
package apply
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
k6tv1 "kubevirt.io/api/core/v1"
appsv1 "k8s.io/api/apps/v1"
operatorsv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
policyv1 "k8s.io/api/policy/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func getGroupResource(required runtime.Object) (group string, resource string, err error) {
switch required.(type) {
case *extv1.CustomResourceDefinition:
group = "apiextensions.k8s.io/v1"
resource = "customresourcedefinitions"
case *admissionregistrationv1.MutatingWebhookConfiguration:
group = "admissionregistration.k8s.io"
resource = "mutatingwebhookconfigurations"
case *admissionregistrationv1.ValidatingWebhookConfiguration:
group = "admissionregistration.k8s.io"
resource = "validatingwebhookconfigurations"
case *policyv1.PodDisruptionBudget:
group = "apps"
resource = "poddisruptionbudgets"
case *appsv1.Deployment:
group = "apps"
resource = "deployments"
case *appsv1.DaemonSet:
group = "apps"
resource = "daemonsets"
default:
err = fmt.Errorf("resource type is not known")
return
}
return
}
func GetExpectedGeneration(required runtime.Object, previousGenerations []k6tv1.GenerationStatus) int64 {
group, resource, err := getGroupResource(required)
if err != nil {
return -1
}
operatorGenerations := toOperatorGenerations(previousGenerations)
meta := required.(v1.Object)
generation := resourcemerge.GenerationFor(operatorGenerations, schema.GroupResource{Group: group, Resource: resource}, meta.GetNamespace(), meta.GetName())
if generation == nil {
return -1
}
return generation.LastGeneration
}
func SetGeneration(generations *[]k6tv1.GenerationStatus, actual runtime.Object) {
if actual == nil {
return
}
group, resource, err := getGroupResource(actual)
if err != nil {
return
}
operatorGenerations := toOperatorGenerations(*generations)
meta := actual.(v1.Object)
resourcemerge.SetGeneration(&operatorGenerations, operatorsv1.GenerationStatus{
Group: group,
Resource: resource,
Namespace: meta.GetNamespace(),
Name: meta.GetName(),
LastGeneration: meta.GetGeneration(),
})
newGenerations := toAPIGenerations(operatorGenerations)
*generations = newGenerations
}
func toOperatorGeneration(generation k6tv1.GenerationStatus) operatorsv1.GenerationStatus {
return operatorsv1.GenerationStatus{
Group: generation.Group,
Resource: generation.Resource,
Namespace: generation.Namespace,
Name: generation.Name,
LastGeneration: generation.LastGeneration,
Hash: generation.Hash,
}
}
func toAPIGeneration(generation operatorsv1.GenerationStatus) k6tv1.GenerationStatus {
return k6tv1.GenerationStatus{
Group: generation.Group,
Resource: generation.Resource,
Namespace: generation.Namespace,
Name: generation.Name,
LastGeneration: generation.LastGeneration,
Hash: generation.Hash,
}
}
func toOperatorGenerations(generations []k6tv1.GenerationStatus) (operatorGenerations []operatorsv1.GenerationStatus) {
for _, generation := range generations {
operatorGenerations = append(operatorGenerations, toOperatorGeneration(generation))
}
return operatorGenerations
}
func toAPIGenerations(generations []operatorsv1.GenerationStatus) (apiGenerations []k6tv1.GenerationStatus) {
for _, generation := range generations {
apiGenerations = append(apiGenerations, toAPIGeneration(generation))
}
return apiGenerations
}
package apply
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
v1 "kubevirt.io/api/core/v1"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/log"
)
func (r *Reconciler) createOrUpdateInstancetypes() error {
for _, instancetype := range r.targetStrategy.Instancetypes() {
if err := r.createOrUpdateInstancetype(instancetype.DeepCopy()); err != nil {
return err
}
}
return nil
}
func (r *Reconciler) findInstancetype(name string) (*instancetypev1beta1.VirtualMachineClusterInstancetype, error) {
obj, exists, err := r.stores.ClusterInstancetype.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("VirtualMachineClusterInstancetype"), name)
}
foundObj, ok := obj.(*instancetypev1beta1.VirtualMachineClusterInstancetype)
if !ok {
return nil, fmt.Errorf("unknown object within VirtualMachineClusterInstancetype store")
}
return foundObj, nil
}
func (r *Reconciler) createOrUpdateInstancetype(instancetype *instancetypev1beta1.VirtualMachineClusterInstancetype) error {
foundObj, err := r.findInstancetype(instancetype.Name)
if err != nil && !errors.IsNotFound(err) {
return err
}
imageTag, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &instancetype.ObjectMeta, imageTag, imageRegistry, id, true)
if errors.IsNotFound(err) {
if _, err := r.clientset.VirtualMachineClusterInstancetype().Create(context.Background(), instancetype, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("unable to create instancetype %+v: %v", instancetype, err)
}
log.Log.V(2).Infof("instancetype %v created", instancetype.GetName())
return nil
}
if equality.Semantic.DeepEqual(foundObj.Annotations, instancetype.Annotations) &&
equality.Semantic.DeepEqual(foundObj.Labels, instancetype.Labels) &&
equality.Semantic.DeepEqual(foundObj.Spec, instancetype.Spec) {
log.Log.V(4).Infof("instancetype %v is up-to-date", instancetype.GetName())
return nil
}
instancetype.ResourceVersion = foundObj.ResourceVersion
if _, err := r.clientset.VirtualMachineClusterInstancetype().Update(context.Background(), instancetype, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("unable to update instancetype %+v: %v", instancetype, err)
}
log.Log.V(2).Infof("instancetype %v updated", instancetype.GetName())
return nil
}
func (r *Reconciler) deleteInstancetypes() error {
foundInstancetype := false
for _, instancetype := range r.targetStrategy.Instancetypes() {
_, exists, err := r.stores.ClusterInstancetype.GetByKey(instancetype.Name)
if err != nil {
return err
}
if exists {
foundInstancetype = true
break
}
}
if !foundInstancetype {
return nil
}
ls := labels.Set{
v1.AppComponentLabel: GetAppComponent(r.kv),
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
}
if err := r.clientset.VirtualMachineClusterInstancetype().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: ls.String(),
}); err != nil {
return fmt.Errorf("unable to delete preferences: %v", err)
}
return nil
}
func (r *Reconciler) createOrUpdatePreferences() error {
for _, preference := range r.targetStrategy.Preferences() {
if err := r.createOrUpdatePreference(preference.DeepCopy()); err != nil {
return err
}
}
return nil
}
func (r *Reconciler) findPreference(name string) (*instancetypev1beta1.VirtualMachineClusterPreference, error) {
obj, exists, err := r.stores.ClusterPreference.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("VirtualMachineClusterPreference"), name)
}
foundObj, ok := obj.(*instancetypev1beta1.VirtualMachineClusterPreference)
if !ok {
return nil, fmt.Errorf("unknown object within VirtualMachineClusterPreference store")
}
return foundObj, nil
}
func (r *Reconciler) createOrUpdatePreference(preference *instancetypev1beta1.VirtualMachineClusterPreference) error {
foundObj, err := r.findPreference(preference.Name)
if err != nil && !errors.IsNotFound(err) {
return err
}
imageTag, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &preference.ObjectMeta, imageTag, imageRegistry, id, true)
if errors.IsNotFound(err) {
if _, err := r.clientset.VirtualMachineClusterPreference().Create(context.Background(), preference, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("unable to create preference %+v: %v", preference, err)
}
log.Log.V(2).Infof("preference %v created", preference.GetName())
return nil
}
if equality.Semantic.DeepEqual(foundObj.Annotations, preference.Annotations) &&
equality.Semantic.DeepEqual(foundObj.Labels, preference.Labels) &&
equality.Semantic.DeepEqual(foundObj.Spec, preference.Spec) {
log.Log.V(4).Infof("preference %v is up-to-date", preference.GetName())
return nil
}
preference.ResourceVersion = foundObj.ResourceVersion
if _, err := r.clientset.VirtualMachineClusterPreference().Update(context.Background(), preference, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("unable to update preference %+v: %v", preference, err)
}
log.Log.V(2).Infof("preference %v updated", preference.GetName())
return nil
}
func (r *Reconciler) deletePreferences() error {
foundPreference := false
for _, preference := range r.targetStrategy.Preferences() {
_, exists, err := r.stores.ClusterPreference.GetByKey(preference.Name)
if err != nil {
return err
}
if exists {
foundPreference = true
break
}
}
if !foundPreference {
return nil
}
ls := labels.Set{
v1.AppComponentLabel: GetAppComponent(r.kv),
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
}
if err := r.clientset.VirtualMachineClusterPreference().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: ls.String(),
}); err != nil {
return fmt.Errorf("unable to delete preferences: %v", err)
}
return nil
}
package apply
import (
// #nosec sha1 is used to calculate a hash for patches and not for cryptographic
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"strings"
jsonpatch "github.com/evanphx/json-patch"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/install"
)
type Customizer struct {
Patches []v1.CustomizeComponentsPatch
hash string
}
func NewCustomizer(customizations v1.CustomizeComponents) (*Customizer, error) {
hash, err := getHash(customizations)
if err != nil {
return &Customizer{}, err
}
patches := customizations.Patches
flagPatches := flagsToPatches(customizations.Flags)
patches = append(patches, flagPatches...)
return &Customizer{
Patches: patches,
hash: hash,
}, nil
}
func flagsToPatches(flags *v1.Flags) []v1.CustomizeComponentsPatch {
patches := []v1.CustomizeComponentsPatch{}
if flags == nil {
return patches
}
patches = addFlagsPatch(components.VirtAPIName, "Deployment", flags.API, patches)
patches = addFlagsPatch(components.VirtControllerName, "Deployment", flags.Controller, patches)
patches = addFlagsPatch(components.VirtHandlerName, "DaemonSet", flags.Handler, patches)
return patches
}
func addFlagsPatch(name, resource string, flags map[string]string, patches []v1.CustomizeComponentsPatch) []v1.CustomizeComponentsPatch {
if len(flags) == 0 {
return patches
}
return append(patches, v1.CustomizeComponentsPatch{
ResourceName: name,
ResourceType: resource,
Patch: fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":%q,"command":["%s","%s"]}]}}}}`, name, name, strings.Join(flagsToArray(flags), `","`)),
Type: v1.StrategicMergePatchType,
})
}
func flagsToArray(flags map[string]string) []string {
farr := make([]string, 0)
fnames := make([]string, 0, len(flags))
for flag := range flags {
fnames = append(fnames, flag)
}
sort.Strings(fnames)
for _, flag := range fnames {
flagName := strings.ToLower(flag)
val := strings.ToLower(flags[flag])
switch val {
case "":
farr = append(farr, fmt.Sprintf("--%s", flagName))
case "true", "false":
farr = append(farr, fmt.Sprintf("--%s=%s", flagName, val))
default:
farr = append(farr, fmt.Sprintf("--%s", flagName))
farr = append(farr, val)
}
}
return farr
}
func (c *Customizer) Hash() string {
return c.hash
}
func (c *Customizer) GenericApplyPatches(objects interface{}) error {
switch reflect.TypeOf(objects).Kind() {
case reflect.Slice:
s := reflect.ValueOf(objects)
for i := 0; i < s.Len(); i++ {
o := s.Index(i)
obj, ok := o.Interface().(runtime.Object)
if !ok {
return errors.New("Slice must contain objects of type 'runtime.Object'")
}
kind := obj.GetObjectKind().GroupVersionKind().Kind
v := reflect.Indirect(o).FieldByName("ObjectMeta").FieldByName("Name")
name := v.String()
patches := c.GetPatchesForResource(kind, name)
patches = append(patches, v1.CustomizeComponentsPatch{
Patch: fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, v1.KubeVirtCustomizeComponentAnnotationHash, c.hash),
Type: v1.StrategicMergePatchType,
})
err := applyPatches(obj, patches)
if err != nil {
return err
}
}
}
return nil
}
func (c *Customizer) Apply(targetStrategy install.StrategyInterface) error {
err := c.GenericApplyPatches(targetStrategy.Deployments())
if err != nil {
return err
}
err = c.GenericApplyPatches(targetStrategy.Services())
if err != nil {
return err
}
err = c.GenericApplyPatches(targetStrategy.DaemonSets())
if err != nil {
return err
}
err = c.GenericApplyPatches(targetStrategy.ValidatingWebhookConfigurations())
if err != nil {
return err
}
err = c.GenericApplyPatches(targetStrategy.MutatingWebhookConfigurations())
if err != nil {
return err
}
err = c.GenericApplyPatches(targetStrategy.APIServices())
if err != nil {
return err
}
err = c.GenericApplyPatches(targetStrategy.CertificateSecrets())
if err != nil {
return err
}
return nil
}
func applyPatches(obj runtime.Object, patches []v1.CustomizeComponentsPatch) error {
if len(patches) == 0 {
return nil
}
for _, p := range patches {
err := applyPatch(obj, p)
if err != nil {
return err
}
}
return nil
}
func applyPatch(obj runtime.Object, patch v1.CustomizeComponentsPatch) error {
if obj == nil {
return nil
}
old, err := json.Marshal(obj)
if err != nil {
return err
}
// reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields
// in obj that are removed by patch are cleared
value := reflect.ValueOf(obj)
value.Elem().Set(reflect.New(value.Type().Elem()).Elem())
switch patch.Type {
case v1.JSONPatchType:
patch, err := jsonpatch.DecodePatch([]byte(patch.Patch))
if err != nil {
return err
}
modified, err := patch.Apply(old)
if err != nil {
return err
}
if err = json.Unmarshal(modified, obj); err != nil {
return err
}
case v1.MergePatchType:
modified, err := jsonpatch.MergePatch(old, []byte(patch.Patch))
if err != nil {
return err
}
if err := json.Unmarshal(modified, obj); err != nil {
return err
}
case v1.StrategicMergePatchType:
mergedByte, err := strategicpatch.StrategicMergePatch(old, []byte(patch.Patch), obj)
if err != nil {
return err
}
if err = json.Unmarshal(mergedByte, obj); err != nil {
return err
}
default:
return fmt.Errorf("PatchType is not supported")
}
return nil
}
func (c *Customizer) GetPatches() []v1.CustomizeComponentsPatch {
return c.Patches
}
func (c *Customizer) GetPatchesForResource(resourceType, name string) []v1.CustomizeComponentsPatch {
allPatches := c.Patches
patches := make([]v1.CustomizeComponentsPatch, 0)
for _, p := range allPatches {
if valueMatchesKey(p.ResourceType, resourceType) && valueMatchesKey(p.ResourceName, name) {
patches = append(patches, p)
}
}
return patches
}
func valueMatchesKey(value, key string) bool {
if value == "*" {
return true
}
return strings.EqualFold(key, value)
}
func getHash(customizations v1.CustomizeComponents) (string, error) {
// #nosec CWE: 326 - Use of weak cryptographic primitive (http://cwe.mitre.org/data/definitions/326.html)
// reason: sha1 is not used for encryption but for creating a hash value
hasher := sha1.New()
sort.SliceStable(customizations.Patches, func(i, j int) bool {
return len(customizations.Patches[i].Patch) < len(customizations.Patches[j].Patch)
})
values, err := json.Marshal(customizations)
if err != nil {
return "", err
}
hasher.Write(values)
return hex.EncodeToString(hasher.Sum(nil)), nil
}
package apply
import (
"context"
"fmt"
"dario.cat/mergo"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
)
func (r *Reconciler) createOrUpdateServiceMonitors() error {
if !r.config.ServiceMonitorEnabled {
return nil
}
for _, serviceMonitor := range r.targetStrategy.ServiceMonitors() {
if err := r.createOrUpdateServiceMonitor(serviceMonitor.DeepCopy()); err != nil {
return err
}
}
return nil
}
func (r *Reconciler) createOrUpdateServiceMonitor(serviceMonitor *promv1.ServiceMonitor) error {
prometheusClient := r.clientset.PrometheusClient()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
obj, exists, _ := r.stores.ServiceMonitorCache.Get(serviceMonitor)
injectOperatorMetadata(r.kv, &serviceMonitor.ObjectMeta, version, imageRegistry, id, true)
if !exists {
// Create non existent
r.expectations.ServiceMonitor.RaiseExpectations(r.kvKey, 1, 0)
_, err := prometheusClient.MonitoringV1().ServiceMonitors(serviceMonitor.Namespace).Create(context.Background(), serviceMonitor, metav1.CreateOptions{})
if err != nil {
r.expectations.ServiceMonitor.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create serviceMonitor %+v: %v", serviceMonitor, err)
}
log.Log.V(2).Infof("serviceMonitor %v created", serviceMonitor.GetName())
return nil
}
cachedServiceMonitor := obj.(*promv1.ServiceMonitor)
endpointsModified, err := ensureServiceMonitorSpec(serviceMonitor, cachedServiceMonitor)
if err != nil {
return err
}
modified := resourcemerge.BoolPtr(false)
resourcemerge.EnsureObjectMeta(modified, &cachedServiceMonitor.ObjectMeta, serviceMonitor.ObjectMeta)
// there was no change to metadata and the spec fields are equal
if !*modified && !endpointsModified {
log.Log.V(4).Infof("serviceMonitor %v is up-to-date", serviceMonitor.GetName())
return nil
}
patchBytes, err := patch.New(getPatchWithObjectMetaAndSpec([]patch.PatchOption{}, &serviceMonitor.ObjectMeta, serviceMonitor.Spec)...).GeneratePayload()
if err != nil {
return err
}
_, err = prometheusClient.MonitoringV1().ServiceMonitors(serviceMonitor.Namespace).Patch(context.Background(), serviceMonitor.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch serviceMonitor %+v: %v", serviceMonitor, err)
}
log.Log.V(2).Infof("serviceMonitor %v updated", serviceMonitor.GetName())
return nil
}
func ensureServiceMonitorSpec(required, existing *promv1.ServiceMonitor) (bool, error) {
if err := mergo.Merge(&existing.Spec, &required.Spec); err != nil {
return false, err
}
if equality.Semantic.DeepEqual(existing.Spec, required.Spec) {
return false, nil
}
return true, nil
}
func (r *Reconciler) createOrUpdatePrometheusRules() error {
if !r.config.PrometheusRulesEnabled {
return nil
}
for _, prometheusRule := range r.targetStrategy.PrometheusRules() {
if err := r.createOrUpdatePrometheusRule(prometheusRule.DeepCopy()); err != nil {
return err
}
}
return nil
}
func (r *Reconciler) createOrUpdatePrometheusRule(prometheusRule *promv1.PrometheusRule) error {
prometheusClient := r.clientset.PrometheusClient()
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
obj, exists, _ := r.stores.PrometheusRuleCache.Get(prometheusRule)
injectOperatorMetadata(r.kv, &prometheusRule.ObjectMeta, version, imageRegistry, id, true)
if !exists {
// Create non existent
r.expectations.PrometheusRule.RaiseExpectations(r.kvKey, 1, 0)
_, err := prometheusClient.MonitoringV1().PrometheusRules(prometheusRule.Namespace).Create(context.Background(), prometheusRule, metav1.CreateOptions{})
if err != nil {
r.expectations.PrometheusRule.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create PrometheusRule %+v: %v", prometheusRule, err)
}
log.Log.V(2).Infof("PrometheusRule %v created", prometheusRule.GetName())
return nil
}
cachedPrometheusRule := obj.(*promv1.PrometheusRule)
modified := resourcemerge.BoolPtr(false)
existingCopy := cachedPrometheusRule.DeepCopy()
resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, prometheusRule.ObjectMeta)
if !*modified && equality.Semantic.DeepEqual(cachedPrometheusRule.Spec, prometheusRule.Spec) {
log.Log.V(4).Infof("PrometheusRule %v is up-to-date", prometheusRule.GetName())
return nil
}
patchBytes, err := patch.New(getPatchWithObjectMetaAndSpec([]patch.PatchOption{}, &prometheusRule.ObjectMeta, prometheusRule.Spec)...).GeneratePayload()
if err != nil {
return err
}
_, err = prometheusClient.MonitoringV1().PrometheusRules(prometheusRule.Namespace).Patch(context.Background(), prometheusRule.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch PrometheusRule %+v: %v", prometheusRule, err)
}
log.Log.V(2).Infof("PrometheusRule %v updated", prometheusRule.GetName())
return nil
}
package apply
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
"k8s.io/client-go/tools/cache"
"kubevirt.io/kubevirt/pkg/controller"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/rbac"
)
func (r *Reconciler) createOrUpdateClusterRole(cr *rbacv1.ClusterRole, imageTag string, imageRegistry string, id string) error {
return rbacCreateOrUpdate(r, cr, imageTag, imageRegistry, id)
}
func (r *Reconciler) createOrUpdateClusterRoleBinding(crb *rbacv1.ClusterRoleBinding, imageTag string, imageRegistry string, id string) error {
return rbacCreateOrUpdate(r, crb, imageTag, imageRegistry, id)
}
func (r *Reconciler) createOrUpdateRole(role *rbacv1.Role, imageTag string, imageRegistry string, id string) error {
if !r.config.ServiceMonitorEnabled && (role.Name == rbac.MONITOR_SERVICEACCOUNT_NAME) {
return nil
}
return rbacCreateOrUpdate(r, role, imageTag, imageRegistry, id)
}
func (r *Reconciler) createOrUpdateRoleBinding(rb *rbacv1.RoleBinding, imageTag string, imageRegistry string, id string) error {
if !r.config.ServiceMonitorEnabled && (rb.Name == rbac.MONITOR_SERVICEACCOUNT_NAME) {
return nil
}
return rbacCreateOrUpdate(r, rb, imageTag, imageRegistry, id)
}
func rbacCreateOrUpdate(r *Reconciler, required runtime.Object, imageTag, imageRegistry, id string) (err error) {
roleTypeName := required.GetObjectKind().GroupVersionKind().Kind
cachedRoleInterface, exists, _ := getRbacCache(r, required).Get(required)
requiredMeta := getRbacMetaObject(required)
injectOperatorMetadata(r.kv, requiredMeta, imageTag, imageRegistry, id, true)
if !exists {
// Create non existent
err = getRbacCreateFunction(r, required)()
if err != nil {
return fmt.Errorf("unable to create %v %+v: %v", roleTypeName, required, err)
}
log.Log.V(2).Infof("%v %v created", roleTypeName, requiredMeta.GetName())
return nil
}
metaChanged := resourcemerge.BoolPtr(false)
existingCopy := cachedRoleInterface.(runtime.Object).DeepCopyObject()
existingCopyMeta := getRbacMetaObject(existingCopy)
resourcemerge.EnsureObjectMeta(metaChanged, existingCopyMeta, *requiredMeta)
enforceAPIGroup(existingCopy, required)
specChanged := changeRbacExistingByRequired(existingCopy, required)
if !*metaChanged && !specChanged {
log.Log.V(4).Infof("%v %v already exists", roleTypeName, requiredMeta.GetName())
return nil
}
// Update existing, we don't need to patch for rbac rules.
err = getRbacUpdateFunction(r, existingCopy)()
if err != nil {
return fmt.Errorf("unable to update %v %+v: %v", roleTypeName, required, err)
}
log.Log.V(2).Infof("%v %v updated", roleTypeName, requiredMeta.GetName())
return nil
}
func getRbacCreateFunction(r *Reconciler, obj runtime.Object) (createFunc func() error) {
rbacObj := r.clientset.RbacV1()
namespace := r.kv.Namespace
raiseExpectation := func(exp *controller.UIDTrackingControllerExpectations) {
exp.RaiseExpectations(r.kvKey, 1, 0)
}
lowerExpectationIfErr := func(exp *controller.UIDTrackingControllerExpectations, err error) {
if err != nil {
exp.LowerExpectations(r.kvKey, 1, 0)
}
}
switch obj.(type) {
case *rbacv1.Role:
role := obj.(*rbacv1.Role)
createFunc = func() error {
raiseExpectation(r.expectations.Role)
_, err := rbacObj.Roles(namespace).Create(context.Background(), role, metav1.CreateOptions{})
lowerExpectationIfErr(r.expectations.Role, err)
return err
}
case *rbacv1.ClusterRole:
role := obj.(*rbacv1.ClusterRole)
createFunc = func() error {
raiseExpectation(r.expectations.ClusterRole)
_, err := rbacObj.ClusterRoles().Create(context.Background(), role, metav1.CreateOptions{})
lowerExpectationIfErr(r.expectations.ClusterRole, err)
return err
}
case *rbacv1.RoleBinding:
roleBinding := obj.(*rbacv1.RoleBinding)
createFunc = func() error {
raiseExpectation(r.expectations.RoleBinding)
_, err := rbacObj.RoleBindings(namespace).Create(context.Background(), roleBinding, metav1.CreateOptions{})
lowerExpectationIfErr(r.expectations.RoleBinding, err)
return err
}
case *rbacv1.ClusterRoleBinding:
roleBinding := obj.(*rbacv1.ClusterRoleBinding)
createFunc = func() error {
raiseExpectation(r.expectations.ClusterRoleBinding)
_, err := rbacObj.ClusterRoleBindings().Create(context.Background(), roleBinding, metav1.CreateOptions{})
lowerExpectationIfErr(r.expectations.ClusterRoleBinding, err)
return err
}
}
return
}
func getRbacUpdateFunction(r *Reconciler, obj runtime.Object) (updateFunc func() (err error)) {
rbacObj := r.clientset.RbacV1()
namespace := r.kv.Namespace
switch obj.(type) {
case *rbacv1.Role:
role := obj.(*rbacv1.Role)
updateFunc = func() (err error) {
_, err = rbacObj.Roles(namespace).Update(context.Background(), role, metav1.UpdateOptions{})
return err
}
case *rbacv1.ClusterRole:
role := obj.(*rbacv1.ClusterRole)
updateFunc = func() (err error) {
_, err = rbacObj.ClusterRoles().Update(context.Background(), role, metav1.UpdateOptions{})
return err
}
case *rbacv1.RoleBinding:
roleBinding := obj.(*rbacv1.RoleBinding)
updateFunc = func() (err error) {
_, err = rbacObj.RoleBindings(namespace).Update(context.Background(), roleBinding, metav1.UpdateOptions{})
return err
}
case *rbacv1.ClusterRoleBinding:
roleBinding := obj.(*rbacv1.ClusterRoleBinding)
updateFunc = func() (err error) {
_, err = rbacObj.ClusterRoleBindings().Update(context.Background(), roleBinding, metav1.UpdateOptions{})
return err
}
}
return
}
func getRbacMetaObject(obj runtime.Object) (meta *metav1.ObjectMeta) {
switch obj.(type) {
case *rbacv1.Role:
role := obj.(*rbacv1.Role)
meta = &role.ObjectMeta
case *rbacv1.ClusterRole:
role := obj.(*rbacv1.ClusterRole)
meta = &role.ObjectMeta
case *rbacv1.RoleBinding:
roleBinding := obj.(*rbacv1.RoleBinding)
meta = &roleBinding.ObjectMeta
case *rbacv1.ClusterRoleBinding:
roleBinding := obj.(*rbacv1.ClusterRoleBinding)
meta = &roleBinding.ObjectMeta
}
return
}
func enforceAPIGroup(existing runtime.Object, required runtime.Object) {
var existingRoleRef *rbacv1.RoleRef
var requiredRoleRef *rbacv1.RoleRef
var existingSubjects []rbacv1.Subject
var requiredSubjects []rbacv1.Subject
switch required.(type) {
case *rbacv1.RoleBinding:
crExisting := existing.(*rbacv1.RoleBinding)
crRequired := required.(*rbacv1.RoleBinding)
existingRoleRef = &crExisting.RoleRef
requiredRoleRef = &crRequired.RoleRef
existingSubjects = crExisting.Subjects
requiredSubjects = crRequired.Subjects
case *rbacv1.ClusterRoleBinding:
crbExisting := existing.(*rbacv1.ClusterRoleBinding)
crbRequired := required.(*rbacv1.ClusterRoleBinding)
existingRoleRef = &crbExisting.RoleRef
requiredRoleRef = &crbRequired.RoleRef
existingSubjects = crbExisting.Subjects
requiredSubjects = crbRequired.Subjects
default:
return
}
existingRoleRef.APIGroup = rbacv1.GroupName
for i := range existingSubjects {
if existingSubjects[i].Kind == "User" {
existingSubjects[i].APIGroup = rbacv1.GroupName
}
}
requiredRoleRef.APIGroup = rbacv1.GroupName
for i := range requiredSubjects {
if requiredSubjects[i].Kind == "User" {
requiredSubjects[i].APIGroup = rbacv1.GroupName
}
}
}
func changeRbacExistingByRequired(existing runtime.Object, required runtime.Object) (modified bool) {
// This is to avoid using reflections for performance reasons
arePolicyRulesEqual := func(pr1, pr2 []rbacv1.PolicyRule) bool {
if len(pr1) != len(pr2) {
return false
}
areStringListsEqual := func(strList1 []string, strList2 []string) bool {
if len(strList1) != len(strList2) {
return false
}
for i := range strList1 {
if strList1[i] != strList2[i] {
return false
}
}
return true
}
for i := range pr1 {
if !areStringListsEqual(pr1[i].Verbs, pr2[i].Verbs) || !areStringListsEqual(pr1[i].Resources, pr2[i].Resources) ||
!areStringListsEqual(pr1[i].APIGroups, pr2[i].APIGroups) || !areStringListsEqual(pr1[i].NonResourceURLs, pr2[i].NonResourceURLs) ||
!areStringListsEqual(pr1[i].ResourceNames, pr2[i].ResourceNames) {
return false
}
}
return true
}
changeExistingPolicyRulesByRequired := func(existing, required *[]rbacv1.PolicyRule) (modified bool) {
if !arePolicyRulesEqual(*existing, *required) {
*existing = *required
return true
}
return false
}
changeExistingSubjectsByRequired := func(existingSubjects, requiredSubjects *[]rbacv1.Subject) bool {
modified := false
if len(*existingSubjects) != len(*requiredSubjects) {
*existingSubjects = *requiredSubjects
return false
}
for _, existingSubject := range *existingSubjects {
found := false
for _, requiredSubject := range *requiredSubjects {
if existingSubject == requiredSubject {
found = true
break
}
}
if !found {
modified = true
break
}
}
if modified {
*existingSubjects = *requiredSubjects
}
return modified
}
changeExistingRoleRefByRequired := func(existingRoleRef, requiredRoleRef *rbacv1.RoleRef) (modified bool) {
if *existingRoleRef != *requiredRoleRef {
*existingRoleRef = *requiredRoleRef
return true
}
return false
}
switch existing.(type) {
case *rbacv1.Role:
existingRole := existing.(*rbacv1.Role)
requiredRole := required.(*rbacv1.Role)
modified = changeExistingPolicyRulesByRequired(&existingRole.Rules, &requiredRole.Rules)
case *rbacv1.ClusterRole:
existingClusterRole := existing.(*rbacv1.ClusterRole)
requiredClusterRole := required.(*rbacv1.ClusterRole)
modified = changeExistingPolicyRulesByRequired(&existingClusterRole.Rules, &requiredClusterRole.Rules)
case *rbacv1.RoleBinding:
existingRoleBinding := existing.(*rbacv1.RoleBinding)
requiredRoleBinding := required.(*rbacv1.RoleBinding)
modified = changeExistingSubjectsByRequired(&existingRoleBinding.Subjects, &requiredRoleBinding.Subjects)
modified = changeExistingRoleRefByRequired(&existingRoleBinding.RoleRef, &requiredRoleBinding.RoleRef) || modified
case *rbacv1.ClusterRoleBinding:
existingClusterRoleBinding := existing.(*rbacv1.ClusterRoleBinding)
requiredClusterRoleBinding := required.(*rbacv1.ClusterRoleBinding)
modified = changeExistingSubjectsByRequired(&existingClusterRoleBinding.Subjects, &requiredClusterRoleBinding.Subjects)
modified = changeExistingRoleRefByRequired(&existingClusterRoleBinding.RoleRef, &requiredClusterRoleBinding.RoleRef) || modified
}
return modified
}
func getRbacCache(r *Reconciler, obj runtime.Object) (cache cache.Store) {
switch obj.(type) {
case *rbacv1.Role:
cache = r.stores.RoleCache
case *rbacv1.ClusterRole:
cache = r.stores.ClusterRoleCache
case *rbacv1.RoleBinding:
cache = r.stores.RoleBindingCache
case *rbacv1.ClusterRoleBinding:
cache = r.stores.ClusterRoleBindingCache
}
return cache
}
package apply
import (
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
)
func (r *Reconciler) backupRBACs() error {
// Backup existing ClusterRoles
objects := r.stores.ClusterRoleCache.List()
for _, obj := range objects {
cachedCr, ok := obj.(*rbacv1.ClusterRole)
if !ok || !needsBackup(r.kv, r.stores.ClusterRoleCache, &cachedCr.ObjectMeta) {
continue
}
imageTag, imageRegistry, id, ok := getInstallStrategyAnnotations(&cachedCr.ObjectMeta)
if !ok {
continue
}
err := r.backupRBAC(cachedCr.DeepCopy(), cachedCr.Name, string(cachedCr.UID), imageTag, imageRegistry, id)
if err != nil {
return err
}
}
// Backup existing ClusterRoleBindings
objects = r.stores.ClusterRoleBindingCache.List()
for _, obj := range objects {
cachedCrb, ok := obj.(*rbacv1.ClusterRoleBinding)
if !ok || !needsBackup(r.kv, r.stores.ClusterRoleBindingCache, &cachedCrb.ObjectMeta) {
continue
}
imageTag, imageRegistry, id, ok := getInstallStrategyAnnotations(&cachedCrb.ObjectMeta)
if !ok {
continue
}
err := r.backupRBAC(cachedCrb.DeepCopy(), cachedCrb.Name, string(cachedCrb.UID), imageTag, imageRegistry, id)
if err != nil {
return err
}
}
// Backup existing Roles
objects = r.stores.RoleCache.List()
for _, obj := range objects {
cachedCr, ok := obj.(*rbacv1.Role)
if !ok || !needsBackup(r.kv, r.stores.RoleCache, &cachedCr.ObjectMeta) {
continue
}
imageTag, imageRegistry, id, ok := getInstallStrategyAnnotations(&cachedCr.ObjectMeta)
if !ok {
continue
}
err := r.backupRBAC(cachedCr.DeepCopy(), cachedCr.Name, string(cachedCr.UID), imageTag, imageRegistry, id)
if err != nil {
return err
}
}
// Backup existing RoleBindings
objects = r.stores.RoleBindingCache.List()
for _, obj := range objects {
cachedRb, ok := obj.(*rbacv1.RoleBinding)
if !ok || !needsBackup(r.kv, r.stores.RoleBindingCache, &cachedRb.ObjectMeta) {
continue
}
imageTag, imageRegistry, id, ok := getInstallStrategyAnnotations(&cachedRb.ObjectMeta)
if ok {
continue
}
err := r.backupRBAC(cachedRb.DeepCopy(), cachedRb.Name, string(cachedRb.UID), imageTag, imageRegistry, id)
if err != nil {
return err
}
}
return nil
}
func (r *Reconciler) backupRBAC(obj runtime.Object, name, UID, imageTag, imageRegistry, id string) error {
meta := getRbacMetaObject(obj)
*meta = metav1.ObjectMeta{
GenerateName: name,
}
injectOperatorMetadata(r.kv, meta, imageTag, imageRegistry, id, true)
meta.Annotations[v1.EphemeralBackupObject] = UID
// Create backup
createRole := getRbacCreateFunction(r, obj)
err := createRole()
if err != nil {
return err
}
kind := obj.GetObjectKind().GroupVersionKind().Kind
log.Log.V(2).Infof("backup %v %v created", kind, name)
return nil
}
func shouldBackupRBACObject(kv *v1.KubeVirt, objectMeta *metav1.ObjectMeta) bool {
curVersion, curImageRegistry, curID := getTargetVersionRegistryID(kv)
if objectMatchesVersion(objectMeta, curVersion, curImageRegistry, curID, kv.GetGeneration()) {
// matches current target version already, so doesn't need backup
return false
}
if objectMeta.Annotations == nil {
return false
}
_, ok := objectMeta.Annotations[v1.EphemeralBackupObject]
if ok {
// ephemeral backup objects don't need to be backed up because
// they are the backup
return false
}
return true
}
func needsBackup(kv *v1.KubeVirt, cache cache.Store, meta *metav1.ObjectMeta) bool {
shouldBackup := shouldBackupRBACObject(kv, meta)
imageTag, imageRegistry, id, ok := getInstallStrategyAnnotations(meta)
if !shouldBackup || !ok {
return false
}
// loop through cache and determine if there's an ephemeral backup
// for this object already
objects := cache.List()
for _, obj := range objects {
cachedObj, ok := obj.(*metav1.ObjectMeta)
if !ok ||
cachedObj.DeletionTimestamp != nil ||
meta.Annotations == nil {
continue
}
uid, ok := cachedObj.Annotations[v1.EphemeralBackupObject]
if !ok {
// this is not an ephemeral backup object
continue
}
if uid == string(meta.UID) && objectMatchesVersion(cachedObj, imageTag, imageRegistry, id, kv.GetGeneration()) {
// found backup. UID matches and versions match
// note, it's possible for a single UID to have multiple backups with
// different versions
return false
}
}
return true
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package apply
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/coreos/go-semver/semver"
secv1 "github.com/openshift/api/security/v1"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/certificates/triple"
"kubevirt.io/kubevirt/pkg/certificates/triple/cert"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/install"
"kubevirt.io/kubevirt/pkg/virt-operator/util"
)
const Duration7d = time.Hour * 24 * 7
const Duration1d = time.Hour * 24
func objectMatchesVersion(objectMeta *metav1.ObjectMeta, version, imageRegistry, id string, generation int64) bool {
if objectMeta.Annotations == nil {
return false
}
foundVersion, foundImageRegistry, foundID, _ := getInstallStrategyAnnotations(objectMeta)
foundGeneration, generationExists := objectMeta.Annotations[v1.KubeVirtGenerationAnnotation]
foundLabels := util.IsManagedByOperator(objectMeta.Labels)
sGeneration := strconv.FormatInt(generation, 10)
if generationExists && foundGeneration != sGeneration {
return false
}
if foundVersion == version && foundImageRegistry == imageRegistry && foundID == id && foundLabels {
return true
}
return false
}
func injectOperatorMetadata(kv *v1.KubeVirt, objectMeta *metav1.ObjectMeta, version string, imageRegistry string, id string, injectCustomizationMetadata bool) {
if objectMeta.Labels == nil {
objectMeta.Labels = make(map[string]string)
}
if kv.Spec.ProductVersion != "" && util.IsValidLabel(kv.Spec.ProductVersion) {
objectMeta.Labels[v1.AppVersionLabel] = kv.Spec.ProductVersion
}
if kv.Spec.ProductName != "" && util.IsValidLabel(kv.Spec.ProductName) {
objectMeta.Labels[v1.AppPartOfLabel] = kv.Spec.ProductName
}
objectMeta.Labels[v1.AppComponentLabel] = GetAppComponent(kv)
objectMeta.Labels[v1.ManagedByLabel] = v1.ManagedByLabelOperatorValue
if objectMeta.Annotations == nil {
objectMeta.Annotations = make(map[string]string)
}
objectMeta.Annotations[v1.InstallStrategyVersionAnnotation] = version
objectMeta.Annotations[v1.InstallStrategyRegistryAnnotation] = imageRegistry
objectMeta.Annotations[v1.InstallStrategyIdentifierAnnotation] = id
if injectCustomizationMetadata {
objectMeta.Annotations[v1.KubeVirtGenerationAnnotation] = strconv.FormatInt(kv.ObjectMeta.GetGeneration(), 10)
}
}
func GetAppComponent(kv *v1.KubeVirt) string {
if kv.Spec.ProductComponent != "" && util.IsValidLabel(kv.Spec.ProductComponent) {
return kv.Spec.ProductComponent
}
return v1.AppComponent
}
func createLabelsAndAnnotationsPatch(objectMeta *metav1.ObjectMeta) []patch.PatchOption {
return []patch.PatchOption{patch.WithAdd("/metadata/labels", objectMeta.Labels),
patch.WithAdd("/metadata/annotations", objectMeta.Annotations),
patch.WithAdd("/metadata/ownerReferences", objectMeta.OwnerReferences)}
}
func getPatchWithObjectMetaAndSpec(ops []patch.PatchOption, meta *metav1.ObjectMeta, spec interface{}) []patch.PatchOption {
// Add Labels and Annotations Patches
ops = append(ops, createLabelsAndAnnotationsPatch(meta)...)
// and spec replacement to patch
return append(ops, patch.WithReplace("/spec", spec))
}
func shouldTakeUpdatePath(targetVersion, currentVersion string) bool {
// if no current version, then this can't be an update
if currentVersion == "" {
return false
}
// semver doesn't like the 'v' prefix
targetVersion = strings.TrimPrefix(targetVersion, "v")
currentVersion = strings.TrimPrefix(currentVersion, "v")
// our default position is that this is an update.
// So if the target and current version do not
// adhere to the semver spec, we assume by default the
// update path is the correct path.
shouldTakeUpdatePath := true
target, err := semver.NewVersion(targetVersion)
if err == nil {
current, err := semver.NewVersion(currentVersion)
if err == nil {
if target.Compare(*current) <= 0 {
shouldTakeUpdatePath = false
}
}
}
return shouldTakeUpdatePath
}
func haveApiDeploymentsRolledOver(targetStrategy install.StrategyInterface, kv *v1.KubeVirt, stores util.Stores) bool {
for _, deployment := range targetStrategy.ApiDeployments() {
if !util.DeploymentIsReady(kv, deployment, stores) {
log.Log.V(2).Infof("Waiting on deployment %v to roll over to latest version", deployment.GetName())
// not rolled out yet
return false
}
}
return true
}
func haveControllerDeploymentsRolledOver(targetStrategy install.StrategyInterface, kv *v1.KubeVirt, stores util.Stores) bool {
for _, deployment := range targetStrategy.ControllerDeployments() {
if !util.DeploymentIsReady(kv, deployment, stores) {
log.Log.V(2).Infof("Waiting on deployment %v to roll over to latest version", deployment.GetName())
// not rolled out yet
return false
}
}
return true
}
func haveExportProxyDeploymentsRolledOver(targetStrategy install.StrategyInterface, kv *v1.KubeVirt, stores util.Stores) bool {
for _, deployment := range targetStrategy.ExportProxyDeployments() {
if !util.DeploymentIsReady(kv, deployment, stores) {
log.Log.V(2).Infof("Waiting on deployment %v to roll over to latest version", deployment.GetName())
// not rolled out yet
return false
}
}
return true
}
func haveSynchronizationControllerDeploymentsRolledOver(targetStrategy install.StrategyInterface, kv *v1.KubeVirt, stores util.Stores) bool {
for _, deployment := range targetStrategy.SynchronizationControllerDeployments() {
if !util.DeploymentIsReady(kv, deployment, stores) {
log.Log.V(2).Infof("Waiting on deployment %v to roll over to latest version", deployment.GetName())
// not rolled out yet
return false
}
}
return true
}
func haveDaemonSetsRolledOver(targetStrategy install.StrategyInterface, kv *v1.KubeVirt, stores util.Stores) bool {
for _, daemonSet := range targetStrategy.DaemonSets() {
if !util.DaemonsetIsReady(kv, daemonSet, stores) {
log.Log.V(2).Infof("Waiting on daemonset %v to roll over to latest version", daemonSet.GetName())
// not rolled out yet
return false
}
}
return true
}
func (r *Reconciler) createDummyWebhookValidator() error {
var webhooks []admissionregistrationv1.ValidatingWebhook
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
// If webhook already exists in cache, then exit.
objects := r.stores.ValidationWebhookCache.List()
for _, obj := range objects {
if webhook, ok := obj.(*admissionregistrationv1.ValidatingWebhookConfiguration); ok {
if objectMatchesVersion(&webhook.ObjectMeta, version, imageRegistry, id, r.kv.GetGeneration()) {
// already created blocking webhook for this version
return nil
}
}
}
// generate a fake cert. this isn't actually used
sideEffectNone := admissionregistrationv1.SideEffectClassNone
failurePolicy := admissionregistrationv1.Fail
for _, crd := range r.targetStrategy.CRDs() {
_, exists, _ := r.stores.OperatorCrdCache.Get(crd)
if exists {
// this CRD isn't new, it already exists in cache so we don't
// need a blocking admission webhook to wait until the new
// apiserver is active
continue
}
path := fmt.Sprintf("/fake-path/%s", crd.Name)
webhooks = append(webhooks, admissionregistrationv1.ValidatingWebhook{
Name: fmt.Sprintf("%s-tmp-validator", crd.Name),
AdmissionReviewVersions: []string{"v1", "v1beta1"},
SideEffects: &sideEffectNone,
FailurePolicy: &failurePolicy,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{crd.Spec.Group},
APIVersions: v1.ApiSupportedWebhookVersions,
Resources: []string{crd.Spec.Names.Plural},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: r.kv.Namespace,
Name: "fake-validation-service",
Path: &path,
},
},
})
}
// nothing to do here if we have no new CRDs to create webhooks for
if len(webhooks) == 0 {
return nil
}
// Set some fake signing cert bytes in for each rule so the k8s apiserver will
// allow us to create the webhook.
caKeyPair, _ := triple.NewCA("fake.kubevirt.io", time.Hour*24)
signingCertBytes := cert.EncodeCertPEM(caKeyPair.Cert)
for _, webhook := range webhooks {
webhook.ClientConfig.CABundle = signingCertBytes
}
validationWebhook := &admissionregistrationv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "virt-operator-tmp-webhook",
},
Webhooks: webhooks,
}
injectOperatorMetadata(r.kv, &validationWebhook.ObjectMeta, version, imageRegistry, id, true)
r.expectations.ValidationWebhook.RaiseExpectations(r.kvKey, 1, 0)
_, err := r.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.Background(), validationWebhook, metav1.CreateOptions{})
if err != nil {
r.expectations.ValidationWebhook.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create validation webhook: %v", err)
}
log.Log.V(2).Infof("Validation webhook created for image %s and registry %s", version, imageRegistry)
return nil
}
func getTargetVersionRegistryID(kv *v1.KubeVirt) (version string, registry string, id string) {
version = kv.Status.TargetKubeVirtVersion
registry = kv.Status.TargetKubeVirtRegistry
id = kv.Status.TargetDeploymentID
return
}
func isServiceClusterIP(service *corev1.Service) bool {
if service.Spec.Type == "" || service.Spec.Type == corev1.ServiceTypeClusterIP {
return true
}
return false
}
type Reconciler struct {
kv *v1.KubeVirt
kvKey string
targetStrategy install.StrategyInterface
stores util.Stores
config util.OperatorConfig
clientset kubecli.KubevirtClient
aggregatorclient install.APIServiceInterface
expectations *util.Expectations
recorder record.EventRecorder
}
func NewReconciler(kv *v1.KubeVirt, targetStrategy install.StrategyInterface, stores util.Stores, config util.OperatorConfig, clientset kubecli.KubevirtClient, aggregatorclient install.APIServiceInterface, expectations *util.Expectations, recorder record.EventRecorder) (*Reconciler, error) {
kvKey, err := controller.KeyFunc(kv)
if err != nil {
return nil, err
}
customizer, err := NewCustomizer(kv.Spec.CustomizeComponents)
if err != nil {
return nil, err
}
err = customizer.Apply(targetStrategy)
if err != nil {
return nil, err
}
return &Reconciler{
kv: kv,
kvKey: kvKey,
targetStrategy: targetStrategy,
stores: stores,
config: config,
clientset: clientset,
aggregatorclient: aggregatorclient,
expectations: expectations,
recorder: recorder,
}, nil
}
func (r *Reconciler) Sync(queue workqueue.TypedRateLimitingInterface[string]) (bool, error) {
// Avoid log spam by logging this issue once early instead of for once each object created
if !util.IsValidLabel(r.kv.Spec.ProductVersion) {
log.Log.Errorf("invalid kubevirt.spec.productVersion: labels must be 63 characters or less, begin and end with alphanumeric characters, and contain only dot, hyphen or underscore")
}
if !util.IsValidLabel(r.kv.Spec.ProductName) {
log.Log.Errorf("invalid kubevirt.spec.productName: labels must be 63 characters or less, begin and end with alphanumeric characters, and contain only dot, hyphen or underscore")
}
if !util.IsValidLabel(r.kv.Spec.ProductComponent) {
log.Log.Errorf("invalid kubevirt.spec.productComponent: labels must be 63 characters or less, begin and end with alphanumeric characters, and contain only dot, hyphen or underscore")
}
targetVersion := r.kv.Status.TargetKubeVirtVersion
targetImageRegistry := r.kv.Status.TargetKubeVirtRegistry
observedVersion := r.kv.Status.ObservedKubeVirtVersion
observedImageRegistry := r.kv.Status.ObservedKubeVirtRegistry
apiDeploymentsRolledOver := haveApiDeploymentsRolledOver(r.targetStrategy, r.kv, r.stores)
controllerDeploymentsRolledOver := haveControllerDeploymentsRolledOver(r.targetStrategy, r.kv, r.stores)
exportProxyEnabled := r.exportProxyEnabled()
exportProxyDeploymentsRolledOver := !exportProxyEnabled || haveExportProxyDeploymentsRolledOver(r.targetStrategy, r.kv, r.stores)
synchronizationControllerEnabled := r.isFeatureGateEnabled(featuregate.DecentralizedLiveMigration)
synchronizationControllerDeploymentRolledOver := !synchronizationControllerEnabled || haveSynchronizationControllerDeploymentsRolledOver(r.targetStrategy, r.kv, r.stores)
daemonSetsRolledOver := haveDaemonSetsRolledOver(r.targetStrategy, r.kv, r.stores)
infrastructureRolledOver := false
if apiDeploymentsRolledOver && controllerDeploymentsRolledOver && exportProxyDeploymentsRolledOver && daemonSetsRolledOver && synchronizationControllerDeploymentRolledOver {
// infrastructure has rolled over and is available
infrastructureRolledOver = true
} else if (targetVersion == observedVersion) && (targetImageRegistry == observedImageRegistry) {
// infrastructure was observed to have rolled over successfully
// in the past
infrastructureRolledOver = true
}
// -------- CREATE AND ROLE OUT UPDATED OBJECTS --------
// creates a blocking webhook for any new CRDs that don't exist previously.
// this webhook is removed once the new apiserver is online.
if !apiDeploymentsRolledOver {
err := r.createDummyWebhookValidator()
if err != nil {
return false, err
}
} else {
err := deleteDummyWebhookValidators(r.kv, r.clientset, r.stores, r.expectations)
if err != nil {
return false, err
}
}
// create/update CRDs
err := r.createOrUpdateCrds()
if err != nil {
return false, err
}
// create/update serviceMonitor
err = r.createOrUpdateServiceMonitors()
if err != nil {
return false, err
}
// create/update PrometheusRules
err = r.createOrUpdatePrometheusRules()
if err != nil {
return false, err
}
// backup any old RBAC rules that don't match current version
if !infrastructureRolledOver {
err = r.backupRBACs()
if err != nil {
return false, err
}
}
// create/update all RBAC rules
err = r.createOrUpdateRbac()
if err != nil {
return false, err
}
// create/update SCCs
err = r.createOrUpdateSCC()
if err != nil {
return false, err
}
// create/update Services
pending, err := r.createOrUpdateServices()
if err != nil {
return false, err
} else if pending {
// waiting on multi step service change.
// During an update, if the 'type' of the service changes then
// we have to delete the service, wait for the deletion to be observed,
// then create the new service. This is because a service's "type" is
// not mutatable.
return false, nil
}
err = r.createOrUpdateValidatingAdmissionPolicyBindings()
if err != nil {
return false, err
}
err = r.createOrUpdateValidatingAdmissionPolicies()
if err != nil {
return false, err
}
err = r.createOrUpdateComponentsWithCertificates(queue)
if err != nil {
return false, err
}
if infrastructureRolledOver {
err = r.removeKvServiceAccountsFromDefaultSCC(r.kv.Namespace)
if err != nil {
return false, err
}
}
if shouldTakeUpdatePath(targetVersion, observedVersion) {
finished, err := r.updateKubeVirtSystem(controllerDeploymentsRolledOver)
if !finished || err != nil {
return false, err
}
} else {
finished, err := r.createOrRollBackSystem(apiDeploymentsRolledOver)
if !finished || err != nil {
return false, err
}
}
err = r.syncKubevirtNamespaceLabels()
if err != nil {
return false, err
}
if !infrastructureRolledOver {
// still waiting on roll out before cleaning up.
return false, nil
}
// -------- ROLLOUT INCOMPATIBLE CHANGES WHICH REQUIRE A FULL CONTROL PLANE ROLL OVER --------
// some changes can only be done after the control plane rolled over
err = r.rolloutNonCompatibleCRDChanges()
if err != nil {
return false, err
}
// -------- CLEAN UP OLD UNUSED OBJECTS --------
// outdated webhooks can potentially block deletes of other objects during the cleanup and need to be removed first
err = r.deleteObjectsNotInInstallStrategy()
if err != nil {
return false, err
}
if err := r.updateSynchronizationAddress(); err != nil {
return false, err
}
if r.commonInstancetypesDeploymentEnabled() {
if err := r.createOrUpdateInstancetypes(); err != nil {
return false, err
}
if err := r.createOrUpdatePreferences(); err != nil {
return false, err
}
} else {
if err := r.deleteInstancetypes(); err != nil {
return false, err
}
if err := r.deletePreferences(); err != nil {
return false, err
}
}
return true, nil
}
func (r *Reconciler) createOrRollBackSystem(apiDeploymentsRolledOver bool) (bool, error) {
// CREATE/ROLLBACK PATH IS
// 1. apiserver - ensures validation of objects occur before allowing any control plane to act on them.
// 2. wait for apiservers to roll over
// 3. controllers and daemonsets
// create/update API Deployments
for _, deployment := range r.targetStrategy.ApiDeployments() {
deployment, err := r.syncDeployment(deployment)
if err != nil {
return false, err
}
err = r.syncPodDisruptionBudgetForDeployment(deployment)
if err != nil {
return false, err
}
}
// wait on api servers to roll over
if !apiDeploymentsRolledOver {
// not rolled out yet
return false, nil
}
// create/update Controller Deployments
for _, deployment := range r.targetStrategy.ControllerDeployments() {
deployment, err := r.syncDeployment(deployment)
if err != nil {
return false, err
}
err = r.syncPodDisruptionBudgetForDeployment(deployment)
if err != nil {
return false, err
}
}
// create/update ExportProxy Deployments
for _, deployment := range r.targetStrategy.ExportProxyDeployments() {
if r.exportProxyEnabled() {
deployment, err := r.syncDeployment(deployment)
if err != nil {
return false, err
}
err = r.syncPodDisruptionBudgetForDeployment(deployment)
if err != nil {
return false, err
}
} else if err := r.deleteDeployment(deployment); err != nil {
return false, err
}
}
// create/update Synchronization controller Deployments
for _, deployment := range r.targetStrategy.SynchronizationControllerDeployments() {
if r.isFeatureGateEnabled(featuregate.DecentralizedLiveMigration) {
deployment, err := r.syncDeployment(deployment)
if err != nil {
return false, err
}
err = r.syncPodDisruptionBudgetForDeployment(deployment)
if err != nil {
return false, err
}
} else if err := r.deleteDeployment(deployment); err != nil {
return false, err
}
}
// create/update Daemonsets
for _, daemonSet := range r.targetStrategy.DaemonSets() {
finished, err := r.syncDaemonSet(daemonSet)
if !finished || err != nil {
return false, err
}
}
return true, nil
}
func (r *Reconciler) deleteDeployment(deployment *appsv1.Deployment) error {
obj, exists, err := r.stores.DeploymentCache.Get(deployment)
if err != nil {
return err
}
if !exists || obj.(*appsv1.Deployment).DeletionTimestamp != nil {
return nil
}
key, err := controller.KeyFunc(deployment)
if err != nil {
return err
}
r.expectations.Deployment.AddExpectedDeletion(r.kvKey, key)
if err := r.clientset.AppsV1().Deployments(deployment.Namespace).Delete(context.Background(), deployment.Name, metav1.DeleteOptions{}); err != nil {
r.expectations.Deployment.DeletionObserved(r.kvKey, key)
return err
}
return nil
}
func (r *Reconciler) deleteObjectsNotInInstallStrategy() error {
gracePeriod := int64(0)
deleteOptions := metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriod,
}
client := r.clientset.ExtensionsClient()
// -------- CLEAN UP OLD UNUSED OBJECTS --------
// outdated webhooks can potentially block deletes of other objects during the cleanup and need to be removed first
// remove unused validating webhooks
objects := r.stores.ValidationWebhookCache.List()
for _, obj := range objects {
if webhook, ok := obj.(*admissionregistrationv1.ValidatingWebhookConfiguration); ok && webhook.DeletionTimestamp == nil {
found := false
if strings.HasPrefix(webhook.Name, "virt-operator-tmp-webhook") {
continue
}
for _, targetWebhook := range r.targetStrategy.ValidatingWebhookConfigurations() {
if targetWebhook.Name == webhook.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(webhook); err == nil {
r.expectations.ValidationWebhook.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.Background(), webhook.Name, deleteOptions)
if err != nil {
r.expectations.ValidationWebhook.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete webhook %+v: %v", webhook, err)
return err
}
}
}
}
}
// remove unused mutating webhooks
objects = r.stores.MutatingWebhookCache.List()
for _, obj := range objects {
if webhook, ok := obj.(*admissionregistrationv1.MutatingWebhookConfiguration); ok && webhook.DeletionTimestamp == nil {
found := false
for _, targetWebhook := range r.targetStrategy.MutatingWebhookConfigurations() {
if targetWebhook.Name == webhook.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(webhook); err == nil {
r.expectations.MutatingWebhook.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.Background(), webhook.Name, deleteOptions)
if err != nil {
r.expectations.MutatingWebhook.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete webhook %+v: %v", webhook, err)
return err
}
}
}
}
}
// remove unused APIServices
objects = r.stores.APIServiceCache.List()
for _, obj := range objects {
if apiService, ok := obj.(*apiregv1.APIService); ok && apiService.DeletionTimestamp == nil {
found := false
for _, targetAPIService := range r.targetStrategy.APIServices() {
if targetAPIService.Name == apiService.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(apiService); err == nil {
r.expectations.APIService.AddExpectedDeletion(r.kvKey, key)
err := r.aggregatorclient.Delete(context.Background(), apiService.Name, deleteOptions)
if err != nil {
r.expectations.APIService.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete apiService %+v: %v", apiService, err)
return err
}
}
}
}
}
// remove unused Secrets
objects = r.stores.SecretCache.List()
for _, obj := range objects {
if secret, ok := obj.(*corev1.Secret); ok && secret.DeletionTimestamp == nil {
found := false
for _, targetSecret := range r.targetStrategy.CertificateSecrets() {
if targetSecret.Name == secret.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(secret); err == nil {
r.expectations.Secrets.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.CoreV1().Secrets(secret.Namespace).Delete(context.Background(), secret.Name, deleteOptions)
if err != nil {
r.expectations.Secrets.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete secret %+v: %v", secret, err)
return err
}
}
}
}
}
// remove unused ConfigMaps
objects = r.stores.ConfigMapCache.List()
for _, obj := range objects {
if configMap, ok := obj.(*corev1.ConfigMap); ok && configMap.DeletionTimestamp == nil {
found := false
for _, targetConfigMap := range r.targetStrategy.ConfigMaps() {
if targetConfigMap.Name == configMap.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(configMap); err == nil {
r.expectations.ConfigMap.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.CoreV1().ConfigMaps(configMap.Namespace).Delete(context.Background(), configMap.Name, deleteOptions)
if err != nil {
r.expectations.ConfigMap.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete configmap %+v: %v", configMap, err)
return err
}
}
}
}
}
// remove unused ValidatingAdmissionPolicyBinding
objects = r.stores.ValidatingAdmissionPolicyBindingCache.List()
for _, obj := range objects {
if validatingAdmissionPolicyBinding, ok := obj.(*admissionregistrationv1.ValidatingAdmissionPolicyBinding); ok && validatingAdmissionPolicyBinding.DeletionTimestamp == nil {
found := false
for _, targetValidatingAdmissionPolicyBinding := range r.targetStrategy.ValidatingAdmissionPolicyBindings() {
if targetValidatingAdmissionPolicyBinding.Name == validatingAdmissionPolicyBinding.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(validatingAdmissionPolicyBinding); err == nil {
r.expectations.ValidatingAdmissionPolicyBinding.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().Delete(context.Background(), validatingAdmissionPolicyBinding.Name, deleteOptions)
if err != nil {
r.expectations.ValidatingAdmissionPolicyBinding.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete validatingAdmissionPolicyBinding %+v: %v", validatingAdmissionPolicyBinding, err)
return err
}
}
}
}
}
// remove unused ValidatingAdmissionPolicy
objects = r.stores.ValidatingAdmissionPolicyCache.List()
for _, obj := range objects {
if validatingAdmissionPolicy, ok := obj.(*admissionregistrationv1.ValidatingAdmissionPolicy); ok && validatingAdmissionPolicy.DeletionTimestamp == nil {
found := false
for _, targetValidatingAdmissionPolicy := range r.targetStrategy.ValidatingAdmissionPolicies() {
if targetValidatingAdmissionPolicy.Name == validatingAdmissionPolicy.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(validatingAdmissionPolicy); err == nil {
r.expectations.ValidatingAdmissionPolicy.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.AdmissionregistrationV1().ValidatingAdmissionPolicies().Delete(context.Background(), validatingAdmissionPolicy.Name, deleteOptions)
if err != nil {
r.expectations.ValidatingAdmissionPolicy.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete validatingAdmissionPolicy %+v: %v", validatingAdmissionPolicy, err)
return err
}
}
}
}
}
// remove unused crds
objects = r.stores.OperatorCrdCache.List()
for _, obj := range objects {
if crd, ok := obj.(*extv1.CustomResourceDefinition); ok && crd.DeletionTimestamp == nil {
found := false
for _, targetCrd := range r.targetStrategy.CRDs() {
if targetCrd.Name == crd.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(crd); err == nil {
r.expectations.OperatorCrd.AddExpectedDeletion(r.kvKey, key)
err := client.ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.Name, deleteOptions)
if err != nil {
r.expectations.OperatorCrd.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete crd %+v: %v", crd, err)
return err
}
}
}
}
}
// remove unused daemonsets
objects = r.stores.DaemonSetCache.List()
for _, obj := range objects {
if ds, ok := obj.(*appsv1.DaemonSet); ok && ds.DeletionTimestamp == nil {
found := false
for _, targetDs := range r.targetStrategy.DaemonSets() {
if targetDs.Name == ds.Name && targetDs.Namespace == ds.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(ds); err == nil {
r.expectations.DaemonSet.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.AppsV1().DaemonSets(ds.Namespace).Delete(context.Background(), ds.Name, deleteOptions)
if err != nil {
r.expectations.DaemonSet.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete daemonset: %v", err)
return err
}
}
}
}
}
// remove unused deployments
objects = r.stores.DeploymentCache.List()
for _, obj := range objects {
if deployment, ok := obj.(*appsv1.Deployment); ok && deployment.DeletionTimestamp == nil {
found := false
for _, targetDeployment := range r.targetStrategy.Deployments() {
if targetDeployment.Name == deployment.Name && targetDeployment.Namespace == deployment.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(deployment); err == nil {
r.expectations.Deployment.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.AppsV1().Deployments(deployment.Namespace).Delete(context.Background(), deployment.Name, deleteOptions)
if err != nil {
r.expectations.Deployment.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete deployment: %v", err)
return err
}
}
}
}
}
// remove unused services
objects = r.stores.ServiceCache.List()
for _, obj := range objects {
if svc, ok := obj.(*corev1.Service); ok && svc.DeletionTimestamp == nil {
found := false
for _, targetSvc := range r.targetStrategy.Services() {
if targetSvc.Name == svc.Name && targetSvc.Namespace == svc.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(svc); err == nil {
r.expectations.Service.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.CoreV1().Services(svc.Namespace).Delete(context.Background(), svc.Name, deleteOptions)
if err != nil {
r.expectations.Service.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete service %+v: %v", svc, err)
return err
}
}
}
}
}
// remove unused clusterrolebindings
objects = r.stores.ClusterRoleBindingCache.List()
for _, obj := range objects {
if crb, ok := obj.(*rbacv1.ClusterRoleBinding); ok && crb.DeletionTimestamp == nil {
found := false
for _, targetCrb := range r.targetStrategy.ClusterRoleBindings() {
if targetCrb.Name == crb.Name && targetCrb.Namespace == crb.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(crb); err == nil {
r.expectations.ClusterRoleBinding.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.RbacV1().ClusterRoleBindings().Delete(context.Background(), crb.Name, deleteOptions)
if err != nil {
r.expectations.ClusterRoleBinding.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete crb %+v: %v", crb, err)
return err
}
}
}
}
}
// remove unused clusterroles
objects = r.stores.ClusterRoleCache.List()
for _, obj := range objects {
if cr, ok := obj.(*rbacv1.ClusterRole); ok && cr.DeletionTimestamp == nil {
found := false
for _, targetCr := range r.targetStrategy.ClusterRoles() {
if targetCr.Name == cr.Name && targetCr.Namespace == cr.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(cr); err == nil {
r.expectations.ClusterRole.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.RbacV1().ClusterRoles().Delete(context.Background(), cr.Name, deleteOptions)
if err != nil {
r.expectations.ClusterRole.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete cr %+v: %v", cr, err)
return err
}
}
}
}
}
// remove unused rolebindings
objects = r.stores.RoleBindingCache.List()
for _, obj := range objects {
if rb, ok := obj.(*rbacv1.RoleBinding); ok && rb.DeletionTimestamp == nil {
found := false
for _, targetRb := range r.targetStrategy.RoleBindings() {
if targetRb.Name == rb.Name && targetRb.Namespace == rb.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(rb); err == nil {
r.expectations.RoleBinding.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.RbacV1().RoleBindings(rb.Namespace).Delete(context.Background(), rb.Name, deleteOptions)
if err != nil {
r.expectations.RoleBinding.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete rb %+v: %v", rb, err)
return err
}
}
}
}
}
// remove unused roles
objects = r.stores.RoleCache.List()
for _, obj := range objects {
if role, ok := obj.(*rbacv1.Role); ok && role.DeletionTimestamp == nil {
found := false
for _, targetR := range r.targetStrategy.Roles() {
if targetR.Name == role.Name && targetR.Namespace == role.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(role); err == nil {
r.expectations.Role.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.RbacV1().Roles(role.Namespace).Delete(context.Background(), role.Name, deleteOptions)
if err != nil {
r.expectations.Role.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete role %+v: %v", role, err)
return err
}
}
}
}
}
// remove unused serviceaccounts
objects = r.stores.ServiceAccountCache.List()
for _, obj := range objects {
if sa, ok := obj.(*corev1.ServiceAccount); ok && sa.DeletionTimestamp == nil {
found := false
for _, targetSa := range r.targetStrategy.ServiceAccounts() {
if targetSa.Name == sa.Name && targetSa.Namespace == sa.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(sa); err == nil {
r.expectations.ServiceAccount.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.CoreV1().ServiceAccounts(sa.Namespace).Delete(context.Background(), sa.Name, deleteOptions)
if err != nil {
r.expectations.ServiceAccount.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete serviceaccount %+v: %v", sa, err)
return err
}
}
}
}
}
// remove unused sccs
objects = r.stores.SCCCache.List()
for _, obj := range objects {
if scc, ok := obj.(*secv1.SecurityContextConstraints); ok && scc.DeletionTimestamp == nil {
// informer watches all SCC objects, it cannot be changed because of kubevirt updates
if !util.IsManagedByOperator(scc.GetLabels()) {
continue
}
found := false
for _, targetScc := range r.targetStrategy.SCCs() {
if targetScc.Name == scc.Name {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(scc); err == nil {
r.expectations.SCC.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.SecClient().SecurityContextConstraints().Delete(context.Background(), scc.Name, deleteOptions)
if err != nil {
r.expectations.SCC.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete SecurityContextConstraints %+v: %v", scc, err)
return err
}
}
}
}
}
// remove unused prometheus rules
objects = r.stores.PrometheusRuleCache.List()
for _, obj := range objects {
if cachePromRule, ok := obj.(*promv1.PrometheusRule); ok && cachePromRule.DeletionTimestamp == nil {
found := false
for _, targetPromRule := range r.targetStrategy.PrometheusRules() {
if targetPromRule.Name == cachePromRule.Name && targetPromRule.Namespace == cachePromRule.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(cachePromRule); err == nil {
r.expectations.PrometheusRule.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.PrometheusClient().
MonitoringV1().
PrometheusRules(cachePromRule.Namespace).
Delete(context.Background(), cachePromRule.Name, deleteOptions)
if err != nil {
r.expectations.PrometheusRule.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete prometheusrule %+v: %v", cachePromRule, err)
return err
}
}
}
}
}
// remove unused prometheus serviceMonitor obejcts
objects = r.stores.ServiceMonitorCache.List()
for _, obj := range objects {
if cacheServiceMonitor, ok := obj.(*promv1.ServiceMonitor); ok && cacheServiceMonitor.DeletionTimestamp == nil {
found := false
for _, targetServiceMonitor := range r.targetStrategy.ServiceMonitors() {
if targetServiceMonitor.Name == cacheServiceMonitor.Name && targetServiceMonitor.Namespace == cacheServiceMonitor.Namespace {
found = true
break
}
}
if !found {
if key, err := controller.KeyFunc(cacheServiceMonitor); err == nil {
r.expectations.ServiceMonitor.AddExpectedDeletion(r.kvKey, key)
err := r.clientset.PrometheusClient().
MonitoringV1().
ServiceMonitors(cacheServiceMonitor.Namespace).
Delete(context.Background(), cacheServiceMonitor.Name, deleteOptions)
if err != nil {
r.expectations.ServiceMonitor.DeletionObserved(r.kvKey, key)
log.Log.Errorf("Failed to delete prometheusServiceMonitor %+v: %v", cacheServiceMonitor, err)
return err
}
}
}
}
}
managedByVirtOperatorLabelSet := labels.Set{
v1.AppComponentLabel: GetAppComponent(r.kv),
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
}
// remove unused instancetype objects
instancetypes, err := r.clientset.VirtualMachineClusterInstancetype().List(context.Background(), metav1.ListOptions{LabelSelector: managedByVirtOperatorLabelSet.String()})
if err != nil {
log.Log.Errorf("Failed to get instancetypes: %v", err)
}
for _, instancetype := range instancetypes.Items {
if instancetype.DeletionTimestamp == nil {
found := false
for _, targetInstancetype := range r.targetStrategy.Instancetypes() {
if targetInstancetype.Name == instancetype.Name {
found = true
break
}
}
if !found {
if err := r.clientset.VirtualMachineClusterInstancetype().Delete(context.Background(), instancetype.Name, metav1.DeleteOptions{}); err != nil {
log.Log.Errorf("Failed to delete instancetype %+v: %v", instancetype, err)
return err
}
}
}
}
// remove unused preference objects
preferences, err := r.clientset.VirtualMachineClusterPreference().List(context.Background(), metav1.ListOptions{LabelSelector: managedByVirtOperatorLabelSet.String()})
if err != nil {
log.Log.Errorf("Failed to get preferences: %v", err)
}
for _, preference := range preferences.Items {
if preference.DeletionTimestamp == nil {
found := false
for _, targetPreference := range r.targetStrategy.Preferences() {
if targetPreference.Name == preference.Name {
found = true
break
}
}
if !found {
if err := r.clientset.VirtualMachineClusterPreference().Delete(context.Background(), preference.Name, metav1.DeleteOptions{}); err != nil {
log.Log.Errorf("Failed to delete preference %+v: %v", preference, err)
return err
}
}
}
}
return nil
}
func (r *Reconciler) isFeatureGateEnabled(featureGate string) bool {
if r.kv.Spec.Configuration.DeveloperConfiguration == nil {
return false
}
for _, fg := range r.kv.Spec.Configuration.DeveloperConfiguration.FeatureGates {
if fg == featureGate {
return true
}
}
return false
}
func (r *Reconciler) exportProxyEnabled() bool {
return r.isFeatureGateEnabled(featuregate.VMExportGate)
}
func (r *Reconciler) commonInstancetypesDeploymentEnabled() bool {
config := r.kv.Spec.Configuration.CommonInstancetypesDeployment
if config != nil && config.Enabled != nil {
return *config.Enabled
}
return true
}
func getInstallStrategyAnnotations(meta *metav1.ObjectMeta) (imageTag, imageRegistry, id string, ok bool) {
var exists bool
imageTag, exists = meta.Annotations[v1.InstallStrategyVersionAnnotation]
if !exists {
ok = false
}
imageRegistry, exists = meta.Annotations[v1.InstallStrategyRegistryAnnotation]
if !exists {
ok = false
}
id, exists = meta.Annotations[v1.InstallStrategyIdentifierAnnotation]
if !exists {
ok = false
}
return
}
package apply
import (
"context"
"fmt"
routev1 "github.com/openshift/api/route/v1"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
)
func (r *Reconciler) createOrUpdateRoutes(caBundle []byte) error {
if !r.config.IsOnOpenshift {
return nil
}
for _, route := range r.targetStrategy.Routes() {
switch route.Name {
case components.VirtExportProxyName:
return r.syncExportProxyRoute(route.DeepCopy(), caBundle)
default:
return fmt.Errorf("unknown route %s", route.Name)
}
}
return nil
}
func (r *Reconciler) syncExportProxyRoute(route *routev1.Route, caBundle []byte) error {
if !r.exportProxyEnabled() {
return r.deleteRoute(route)
}
return r.syncRoute(route, caBundle)
}
func (r *Reconciler) syncRoute(route *routev1.Route, caBundle []byte) error {
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
injectOperatorMetadata(r.kv, &route.ObjectMeta, version, imageRegistry, id, true)
route.Spec.TLS.DestinationCACertificate = string(caBundle)
var cachedRoute *routev1.Route
obj, exists, err := r.stores.RouteCache.Get(route)
if err != nil {
return err
}
if !exists {
r.expectations.Route.RaiseExpectations(r.kvKey, 1, 0)
_, err := r.clientset.RouteClient().Routes(route.Namespace).Create(context.Background(), route, metav1.CreateOptions{})
if err != nil {
r.expectations.Route.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create route %+v: %v", route, err)
}
return nil
}
cachedRoute = obj.(*routev1.Route).DeepCopy()
modified := resourcemerge.BoolPtr(false)
resourcemerge.EnsureObjectMeta(modified, &cachedRoute.ObjectMeta, route.ObjectMeta)
kindSame := equality.Semantic.DeepEqual(cachedRoute.Spec.To.Kind, route.Spec.To.Kind)
nameSame := equality.Semantic.DeepEqual(cachedRoute.Spec.To.Name, route.Spec.To.Name)
terminationSame := equality.Semantic.DeepEqual(cachedRoute.Spec.TLS.Termination, route.Spec.TLS.Termination)
certSame := equality.Semantic.DeepEqual(cachedRoute.Spec.TLS.DestinationCACertificate, route.Spec.TLS.DestinationCACertificate)
if !*modified && kindSame && nameSame && terminationSame && certSame {
log.Log.V(4).Infof("route %v is up-to-date", route.GetName())
return nil
}
patchBytes, err := patch.New(getPatchWithObjectMetaAndSpec([]patch.PatchOption{}, &route.ObjectMeta, route.Spec)...).GeneratePayload()
if err != nil {
return err
}
_, err = r.clientset.RouteClient().Routes(route.Namespace).Patch(context.Background(), route.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch route %+v: %v", route, err)
}
log.Log.V(4).Infof("route %v updated", route.GetName())
return nil
}
func (r *Reconciler) deleteRoute(route *routev1.Route) error {
obj, exists, err := r.stores.RouteCache.Get(route)
if err != nil {
return err
}
if !exists || obj.(*routev1.Route).DeletionTimestamp != nil {
return nil
}
key, err := controller.KeyFunc(route)
if err != nil {
return err
}
r.expectations.Route.AddExpectedDeletion(r.kvKey, key)
if err := r.clientset.RouteClient().Routes(route.Namespace).Delete(context.Background(), route.Name, metav1.DeleteOptions{}); err != nil {
r.expectations.Route.DeletionObserved(r.kvKey, key)
return err
}
return nil
}
package apply
import (
"context"
"fmt"
secv1 "github.com/openshift/api/security/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/apimachinery/patch"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/rbac"
)
func (r *Reconciler) createOrUpdateSCC() error {
sec := r.clientset.SecClient()
if !r.config.IsOnOpenshift {
return nil
}
version, imageRegistry, id := getTargetVersionRegistryID(r.kv)
for _, scc := range r.targetStrategy.SCCs() {
var cachedSCC *secv1.SecurityContextConstraints
scc := scc.DeepCopy()
obj, exists, _ := r.stores.SCCCache.GetByKey(scc.Name)
if exists {
cachedSCC = obj.(*secv1.SecurityContextConstraints)
}
injectOperatorMetadata(r.kv, &scc.ObjectMeta, version, imageRegistry, id, true)
if !exists {
r.expectations.SCC.RaiseExpectations(r.kvKey, 1, 0)
_, err := sec.SecurityContextConstraints().Create(context.Background(), scc, metav1.CreateOptions{})
if err != nil {
r.expectations.SCC.LowerExpectations(r.kvKey, 1, 0)
return fmt.Errorf("unable to create SCC %+v: %v", scc, err)
}
log.Log.V(2).Infof("SCC %v created", scc.Name)
} else if !objectMatchesVersion(&cachedSCC.ObjectMeta, version, imageRegistry, id, r.kv.GetGeneration()) {
scc.ObjectMeta = *cachedSCC.ObjectMeta.DeepCopy()
injectOperatorMetadata(r.kv, &scc.ObjectMeta, version, imageRegistry, id, true)
_, err := sec.SecurityContextConstraints().Update(context.Background(), scc, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("Unable to update %s SecurityContextConstraints", scc.Name)
}
log.Log.V(2).Infof("SecurityContextConstraints %s updated", scc.Name)
} else {
log.Log.V(4).Infof("SCC %s is up to date", scc.Name)
}
}
return nil
}
func (r *Reconciler) removeKvServiceAccountsFromDefaultSCC(targetNamespace string) error {
var remainedUsersList []string
SCCObj, exists, err := r.stores.SCCCache.GetByKey("privileged")
if err != nil {
return err
} else if !exists {
return nil
}
SCC, ok := SCCObj.(*secv1.SecurityContextConstraints)
if !ok {
return fmt.Errorf("couldn't cast object to SecurityContextConstraints: %+v", SCCObj)
}
modified := false
kvServiceAccounts := rbac.GetKubevirtComponentsServiceAccounts(targetNamespace)
for _, acc := range SCC.Users {
if _, ok := kvServiceAccounts[acc]; !ok {
remainedUsersList = append(remainedUsersList, acc)
} else {
modified = true
}
}
if modified {
patchBytes, err := patch.New(
patch.WithTest("/users", SCC.Users),
patch.WithReplace("/users", remainedUsersList),
).GeneratePayload()
if err != nil {
return err
}
_, err = r.clientset.SecClient().SecurityContextConstraints().Patch(context.Background(), "privileged", types.JSONPatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("unable to patch scc: %v", err)
}
}
return nil
}
package apply
import (
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
func (r *Reconciler) updateKubeVirtSystem(controllerDeploymentsRolledOver bool) (bool, error) {
// UPDATE PATH IS
// 1. daemonsets - ensures all compute nodes are updated to handle new features
// 2. wait for daemonsets to roll over
// 3. controllers - ensures control plane is ready for new features
// 4. wait for controllers to roll over
// 5. apiserver - toggles on new features.
// create/update Daemonsets
for _, daemonSet := range r.targetStrategy.DaemonSets() {
finished, err := r.syncDaemonSet(daemonSet)
if !finished || err != nil {
return false, err
}
}
// create/update Controller Deployments
for _, deployment := range r.targetStrategy.ControllerDeployments() {
deployment, err := r.syncDeployment(deployment)
if err != nil {
return false, err
}
err = r.syncPodDisruptionBudgetForDeployment(deployment)
if err != nil {
return false, err
}
}
// wait for controllers
if !controllerDeploymentsRolledOver {
// not rolled out yet
return false, nil
}
// create/update ExportProxy Deployments
for _, deployment := range r.targetStrategy.ExportProxyDeployments() {
if r.exportProxyEnabled() {
deployment, err := r.syncDeployment(deployment)
if err != nil {
return false, err
}
err = r.syncPodDisruptionBudgetForDeployment(deployment)
if err != nil {
return false, err
}
} else if err := r.deleteDeployment(deployment); err != nil {
return false, err
}
}
// create/update Synchronization controller Deployments
for _, deployment := range r.targetStrategy.SynchronizationControllerDeployments() {
if r.isFeatureGateEnabled(featuregate.DecentralizedLiveMigration) {
deployment, err := r.syncDeployment(deployment)
if err != nil {
return false, err
}
err = r.syncPodDisruptionBudgetForDeployment(deployment)
if err != nil {
return false, err
}
} else if err := r.deleteDeployment(deployment); err != nil {
return false, err
}
}
// create/update API Deployments
for _, deployment := range r.targetStrategy.ApiDeployments() {
deployment, err := r.syncDeployment(deployment)
if err != nil {
return false, err
}
err = r.syncPodDisruptionBudgetForDeployment(deployment)
if err != nil {
return false, err
}
}
return true, nil
}
package components
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
v1 "kubevirt.io/api/core/v1"
)
func NewVirtAPIAPIServices(installNamespace string) []*apiregv1.APIService {
apiservices := []*apiregv1.APIService{}
for _, version := range v1.SubresourceGroupVersions {
subresourceAggregatedApiName := version.Version + "." + version.Group
apiservices = append(apiservices, &apiregv1.APIService{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiregistration.k8s.io/v1",
Kind: "APIService",
},
ObjectMeta: metav1.ObjectMeta{
Name: subresourceAggregatedApiName,
Labels: map[string]string{
v1.AppLabel: "virt-api-aggregator",
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
Annotations: map[string]string{
certificatesSecretAnnotationKey: VirtApiCertSecretName,
},
},
Spec: apiregv1.APIServiceSpec{
Service: &apiregv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
},
Group: version.Group,
Version: version.Version,
GroupPriorityMinimum: 1000,
VersionPriority: 15,
},
})
}
return apiservices
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package components
import (
"fmt"
"strings"
"kubevirt.io/api/clone"
clonev1alpha1 "kubevirt.io/api/clone/v1alpha1"
clonev1beta1 "kubevirt.io/api/clone/v1beta1"
"kubevirt.io/api/instancetype"
"kubevirt.io/api/migrations"
migrationsv1 "kubevirt.io/api/migrations/v1alpha1"
schedulingv1 "k8s.io/api/scheduling/v1"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
backupv1alpha1 "kubevirt.io/api/backup/v1alpha1"
virtv1 "kubevirt.io/api/core/v1"
exportv1alpha1 "kubevirt.io/api/export/v1alpha1"
exportv1beta1 "kubevirt.io/api/export/v1beta1"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
poolv1alpha1 "kubevirt.io/api/pool/v1alpha1"
poolv1beta1 "kubevirt.io/api/pool/v1beta1"
snapshotv1alpha1 "kubevirt.io/api/snapshot/v1alpha1"
snapshotv1beta1 "kubevirt.io/api/snapshot/v1beta1"
"kubevirt.io/kubevirt/pkg/pointer"
)
const (
creationTimestampJSONPath = ".metadata.creationTimestamp"
errorMessageJSONPath = ".status.error.message"
phaseJSONPath = ".status.phase"
)
var (
VIRTUALMACHINE = "virtualmachines." + virtv1.VirtualMachineInstanceGroupVersionKind.Group
VIRTUALMACHINEINSTANCE = "virtualmachineinstances." + virtv1.VirtualMachineInstanceGroupVersionKind.Group
VIRTUALMACHINEINSTANCEPRESET = "virtualmachineinstancepresets." + virtv1.VirtualMachineInstancePresetGroupVersionKind.Group
VIRTUALMACHINEINSTANCEREPLICASET = "virtualmachineinstancereplicasets." + virtv1.VirtualMachineInstanceReplicaSetGroupVersionKind.Group
VIRTUALMACHINEINSTANCEMIGRATION = "virtualmachineinstancemigrations." + virtv1.VirtualMachineInstanceMigrationGroupVersionKind.Group
KUBEVIRT = "kubevirts." + virtv1.KubeVirtGroupVersionKind.Group
VIRTUALMACHINEPOOL = "virtualmachinepools." + poolv1beta1.SchemeGroupVersion.Group
VIRTUALMACHINESNAPSHOT = "virtualmachinesnapshots." + snapshotv1beta1.SchemeGroupVersion.Group
VIRTUALMACHINESNAPSHOTCONTENT = "virtualmachinesnapshotcontents." + snapshotv1beta1.SchemeGroupVersion.Group
VIRTUALMACHINEEXPORT = "virtualmachineexports." + exportv1beta1.SchemeGroupVersion.Group
MIGRATIONPOLICY = "migrationpolicies." + migrationsv1.MigrationPolicyKind.Group
VIRTUALMACHINECLONE = "virtualmachineclones." + clone.GroupName
VIRTUALMACHINEBACKUP = "virtualmachinebackups." + backupv1alpha1.SchemeGroupVersion.Group
VIRTUALMACHINEBACKUPTRACKER = "virtualmachinebackuptrackers." + backupv1alpha1.SchemeGroupVersion.Group
)
func addFieldsToVersion(version *extv1.CustomResourceDefinitionVersion, fields ...interface{}) error {
for _, field := range fields {
switch v := field.(type) {
case []extv1.CustomResourceColumnDefinition:
version.AdditionalPrinterColumns = v
case *extv1.CustomResourceSubresources:
version.Subresources = v
case *extv1.CustomResourceValidation:
version.Schema = v
default:
return fmt.Errorf("cannot add field of type %T to a CustomResourceDefinitionVersion", v)
}
}
return nil
}
func addFieldsToAllVersions(crd *extv1.CustomResourceDefinition, fields ...interface{}) error {
for i := range crd.Spec.Versions {
if err := addFieldsToVersion(&crd.Spec.Versions[i], fields...); err != nil {
return err
}
}
return nil
}
func patchValidation(crd *extv1.CustomResourceDefinition, version *extv1.CustomResourceDefinitionVersion) error {
name := crd.Spec.Names.Singular
validation, ok := CRDsValidation[name]
if !ok {
return nil
}
crvalidation := extv1.CustomResourceValidation{}
err := k8syaml.NewYAMLToJSONDecoder(strings.NewReader(validation)).Decode(&crvalidation)
if err != nil {
return fmt.Errorf("Could not decode validation for %s, %v", name, err)
}
if err = addFieldsToVersion(version, &crvalidation); err != nil {
return err
}
return nil
}
func patchValidationForAllVersions(crd *extv1.CustomResourceDefinition) error {
for i := range crd.Spec.Versions {
if err := patchValidation(crd, &crd.Spec.Versions[i]); err != nil {
return err
}
}
return nil
}
func newBlankCrd() *extv1.CustomResourceDefinition {
return &extv1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
}
}
func newCRDVersions() []extv1.CustomResourceDefinitionVersion {
versions := make([]extv1.CustomResourceDefinitionVersion, len(virtv1.ApiSupportedVersions))
copy(versions, virtv1.ApiSupportedVersions)
return versions
}
func NewVirtualMachineInstanceCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINEINSTANCE
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: virtv1.VirtualMachineInstanceGroupVersionKind.Group,
Versions: newCRDVersions(),
Scope: "Namespaced",
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachineinstances",
Singular: "virtualmachineinstance",
Kind: virtv1.VirtualMachineInstanceGroupVersionKind.Kind,
ShortNames: []string{"vmi", "vmis"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "Age", Type: "date", JSONPath: creationTimestampJSONPath},
{Name: "Phase", Type: "string", JSONPath: phaseJSONPath},
{Name: "IP", Type: "string", JSONPath: ".status.interfaces[0].ipAddress"},
{Name: "NodeName", Type: "string", JSONPath: ".status.nodeName"},
{Name: "Ready", Type: "string", JSONPath: ".status.conditions[?(@.type=='Ready')].status"},
{Name: "Live-Migratable", Type: "string", JSONPath: ".status.conditions[?(@.type=='LiveMigratable')].status", Priority: 1},
{Name: "Paused", Type: "string", JSONPath: ".status.conditions[?(@.type=='Paused')].status", Priority: 1},
})
if err != nil {
return nil, err
}
if err := patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINE
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: virtv1.VirtualMachineGroupVersionKind.Group,
Versions: newCRDVersions(),
Scope: "Namespaced",
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachines",
Singular: "virtualmachine",
Kind: virtv1.VirtualMachineGroupVersionKind.Kind,
ShortNames: []string{"vm", "vms"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "Age", Type: "date", JSONPath: creationTimestampJSONPath},
{Name: "Status", Description: "Human Readable Status", Type: "string", JSONPath: ".status.printableStatus"},
{Name: "Ready", Type: "string", JSONPath: ".status.conditions[?(@.type=='Ready')].status"},
}, &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{}})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewPresetCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINEINSTANCEPRESET
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: virtv1.VirtualMachineInstancePresetGroupVersionKind.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: false,
Deprecated: true,
DeprecationWarning: pointer.P("kubevirt.io/v1 VirtualMachineInstancePresets is now deprecated and will be removed in v2."),
},
{
Name: "v1alpha3",
Served: true,
Storage: true,
Deprecated: true,
DeprecationWarning: pointer.P("kubevirt.io/v1alpha3 VirtualMachineInstancePresets is now deprecated and will be removed in v2."),
},
},
Scope: "Namespaced",
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachineinstancepresets",
Singular: "virtualmachineinstancepreset",
Kind: virtv1.VirtualMachineInstancePresetGroupVersionKind.Kind,
ShortNames: []string{"vmipreset", "vmipresets"},
Categories: []string{
"all",
},
},
}
if err := patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewReplicaSetCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
labelSelector := ".status.labelSelector"
crd.ObjectMeta.Name = VIRTUALMACHINEINSTANCEREPLICASET
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: virtv1.VirtualMachineInstanceReplicaSetGroupVersionKind.Group,
Versions: newCRDVersions(),
Scope: "Namespaced",
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachineinstancereplicasets",
Singular: "virtualmachineinstancereplicaset",
Kind: virtv1.VirtualMachineInstanceReplicaSetGroupVersionKind.Kind,
ShortNames: []string{"vmirs", "vmirss"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd,
[]extv1.CustomResourceColumnDefinition{
{Name: "Desired", Type: "integer", JSONPath: ".spec.replicas",
Description: "Number of desired VirtualMachineInstances"},
{Name: "Current", Type: "integer", JSONPath: ".status.replicas",
Description: "Number of managed and not final or deleted VirtualMachineInstances"},
{Name: "Ready", Type: "integer", JSONPath: ".status.readyReplicas",
Description: "Number of managed VirtualMachineInstances which are ready to receive traffic"},
{Name: "Age", Type: "date", JSONPath: creationTimestampJSONPath},
}, &extv1.CustomResourceSubresources{
Scale: &extv1.CustomResourceSubresourceScale{
SpecReplicasPath: ".spec.replicas",
StatusReplicasPath: ".status.replicas",
LabelSelectorPath: &labelSelector,
},
Status: &extv1.CustomResourceSubresourceStatus{},
})
if err != nil {
return nil, err
}
if err := patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineInstanceMigrationCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINEINSTANCEMIGRATION
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: virtv1.VirtualMachineInstanceMigrationGroupVersionKind.Group,
Versions: newCRDVersions(),
Scope: "Namespaced",
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachineinstancemigrations",
Singular: "virtualmachineinstancemigration",
Kind: virtv1.VirtualMachineInstanceMigrationGroupVersionKind.Kind,
ShortNames: []string{"vmim", "vmims"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd,
[]extv1.CustomResourceColumnDefinition{
{Name: "Phase", Type: "string", JSONPath: phaseJSONPath,
Description: "The current phase of VM instance migration"},
{Name: "VMI", Type: "string", JSONPath: ".spec.vmiName",
Description: "The name of the VMI to perform the migration on"},
}, &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
// Used by manifest generation
// If you change something here, you probably need to change the CSV manifest too,
// see /manifests/release/kubevirt.VERSION.csv.yaml.in
func NewKubeVirtCrd() (*extv1.CustomResourceDefinition, error) {
// we use a different label here, so no newBlankCrd()
crd := &extv1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"operator.kubevirt.io": "",
},
},
}
crd.ObjectMeta.Name = KUBEVIRT
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: virtv1.KubeVirtGroupVersionKind.Group,
Versions: newCRDVersions(),
Scope: "Namespaced",
Names: extv1.CustomResourceDefinitionNames{
Plural: "kubevirts",
Singular: "kubevirt",
Kind: virtv1.KubeVirtGroupVersionKind.Kind,
ShortNames: []string{"kv", "kvs"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "Age", Type: "date", JSONPath: creationTimestampJSONPath},
{Name: "Phase", Type: "string", JSONPath: phaseJSONPath},
}, &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachinePoolCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
labelSelector := ".status.labelSelector"
crd.ObjectMeta.Name = VIRTUALMACHINEPOOL
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: poolv1beta1.SchemeGroupVersion.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: poolv1alpha1.SchemeGroupVersion.Version,
Served: true,
Storage: false,
},
{
Name: poolv1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
},
},
Scope: "Namespaced",
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachinepools",
Singular: "virtualmachinepool",
Kind: "VirtualMachinePool",
ShortNames: []string{"vmpool", "vmpools"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd,
[]extv1.CustomResourceColumnDefinition{
{Name: "Desired", Type: "integer", JSONPath: ".spec.replicas",
Description: "Number of desired VirtualMachines"},
{Name: "Current", Type: "integer", JSONPath: ".status.replicas",
Description: "Number of managed and not final or deleted VirtualMachines"},
{Name: "Ready", Type: "integer", JSONPath: ".status.readyReplicas",
Description: "Number of managed VirtualMachines which are ready to receive traffic"},
{Name: "Age", Type: "date", JSONPath: creationTimestampJSONPath},
}, &extv1.CustomResourceSubresources{
Scale: &extv1.CustomResourceSubresourceScale{
SpecReplicasPath: ".spec.replicas",
StatusReplicasPath: ".status.replicas",
LabelSelectorPath: &labelSelector,
},
Status: &extv1.CustomResourceSubresourceStatus{},
})
if err != nil {
return nil, err
}
if err := patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineSnapshotCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINESNAPSHOT
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: snapshotv1beta1.SchemeGroupVersion.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: snapshotv1alpha1.SchemeGroupVersion.Version,
Served: true,
Storage: false,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
{
Name: snapshotv1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
},
Scope: "Namespaced",
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachinesnapshots",
Singular: "virtualmachinesnapshot",
Kind: "VirtualMachineSnapshot",
ShortNames: []string{"vmsnapshot", "vmsnapshots"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "SourceKind", Type: "string", JSONPath: ".spec.source.kind"},
{Name: "SourceName", Type: "string", JSONPath: ".spec.source.name"},
{Name: "Phase", Type: "string", JSONPath: phaseJSONPath},
{Name: "ReadyToUse", Type: "boolean", JSONPath: ".status.readyToUse"},
{Name: "CreationTime", Type: "date", JSONPath: ".status.creationTime"},
{Name: "Error", Type: "string", JSONPath: errorMessageJSONPath},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineSnapshotContentCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINESNAPSHOTCONTENT
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: snapshotv1beta1.SchemeGroupVersion.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: snapshotv1alpha1.SchemeGroupVersion.Version,
Served: true,
Storage: false,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
{
Name: snapshotv1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
},
Scope: "Namespaced",
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachinesnapshotcontents",
Singular: "virtualmachinesnapshotcontent",
Kind: "VirtualMachineSnapshotContent",
ShortNames: []string{"vmsnapshotcontent", "vmsnapshotcontents"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "ReadyToUse", Type: "boolean", JSONPath: ".status.readyToUse"},
{Name: "CreationTime", Type: "date", JSONPath: ".status.creationTime"},
{Name: "Error", Type: "string", JSONPath: errorMessageJSONPath},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineRestoreCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = "virtualmachinerestores." + snapshotv1beta1.SchemeGroupVersion.Group
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: snapshotv1beta1.SchemeGroupVersion.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: snapshotv1alpha1.SchemeGroupVersion.Version,
Served: true,
Storage: false,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
{
Name: snapshotv1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
},
Scope: "Namespaced",
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachinerestores",
Singular: "virtualmachinerestore",
Kind: "VirtualMachineRestore",
ShortNames: []string{"vmrestore", "vmrestores"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "TargetKind", Type: "string", JSONPath: ".spec.target.kind"},
{Name: "TargetName", Type: "string", JSONPath: ".spec.target.name"},
{Name: "Complete", Type: "boolean", JSONPath: ".status.complete"},
{Name: "RestoreTime", Type: "date", JSONPath: ".status.restoreTime"},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineExportCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = "virtualmachineexports." + exportv1beta1.SchemeGroupVersion.Group
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: exportv1beta1.SchemeGroupVersion.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: exportv1alpha1.SchemeGroupVersion.Version,
Served: true,
Storage: false,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
{
Name: exportv1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
},
Scope: "Namespaced",
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachineexports",
Singular: "virtualmachineexport",
Kind: "VirtualMachineExport",
ShortNames: []string{"vmexport", "vmexports"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "SourceKind", Type: "string", JSONPath: ".spec.source.kind"},
{Name: "SourceName", Type: "string", JSONPath: ".spec.source.name"},
{Name: "Phase", Type: "string", JSONPath: phaseJSONPath},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineBackupCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINEBACKUP
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: backupv1alpha1.SchemeGroupVersion.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: backupv1alpha1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
},
Scope: "Namespaced",
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachinebackups",
Singular: "virtualmachinebackup",
Kind: "VirtualMachineBackup",
ShortNames: []string{"vmbackup", "vmbackups"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "SourceKind", Type: "string", JSONPath: ".spec.source.kind"},
{Name: "SourceName", Type: "string", JSONPath: ".spec.source.name"},
{Name: "Type", Type: "string", JSONPath: ".status.Type"},
{Name: "CheckpointName", Type: "string", JSONPath: ".status.CheckpointName"},
{Name: "CompletionTime", Type: "date", JSONPath: ".status.CompletionTime"},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineBackupTrackerCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINEBACKUPTRACKER
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: backupv1alpha1.SchemeGroupVersion.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: backupv1alpha1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
Subresources: &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
},
},
Scope: "Namespaced",
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Names: extv1.CustomResourceDefinitionNames{
Plural: "virtualmachinebackuptrackers",
Singular: "virtualmachinebackuptracker",
Kind: "VirtualMachineBackupTracker",
ShortNames: []string{"vmbackuptracker", "vmbackuptrackers"},
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd, []extv1.CustomResourceColumnDefinition{
{Name: "SourceKind", Type: "string", JSONPath: ".spec.source.kind"},
{Name: "SourceName", Type: "string", JSONPath: ".spec.source.name"},
{Name: "LatestCheckpoint", Type: "string", JSONPath: ".status.latestCheckpoint.name"},
{Name: "CheckpointTime", Type: "date", JSONPath: ".status.latestCheckpoint.creationTime"},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineInstancetypeCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.Name = "virtualmachineinstancetypes." + instancetypev1beta1.SchemeGroupVersion.Group
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: instancetypev1beta1.SchemeGroupVersion.Group,
Names: extv1.CustomResourceDefinitionNames{
Plural: instancetype.PluralResourceName,
Singular: instancetype.SingularResourceName,
ShortNames: []string{"vminstancetype", "vminstancetypes", "vmf", "vmfs"},
Kind: "VirtualMachineInstancetype",
Categories: []string{"all"},
},
Scope: extv1.NamespaceScoped,
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Versions: []extv1.CustomResourceDefinitionVersion{{
Name: instancetypev1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
}},
}
if err := patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineClusterInstancetypeCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.Name = "virtualmachineclusterinstancetypes." + instancetypev1beta1.SchemeGroupVersion.Group
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: instancetypev1beta1.SchemeGroupVersion.Group,
Names: extv1.CustomResourceDefinitionNames{
Plural: instancetype.ClusterPluralResourceName,
Singular: instancetype.ClusterSingularResourceName,
ShortNames: []string{"vmclusterinstancetype", "vmclusterinstancetypes", "vmcf", "vmcfs"},
Kind: "VirtualMachineClusterInstancetype",
},
Scope: extv1.ClusterScoped,
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Versions: []extv1.CustomResourceDefinitionVersion{{
Name: instancetypev1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
}},
}
if err := patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachinePreferenceCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.Name = "virtualmachinepreferences." + instancetypev1beta1.SchemeGroupVersion.Group
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: instancetypev1beta1.SchemeGroupVersion.Group,
Names: extv1.CustomResourceDefinitionNames{
Plural: instancetype.PluralPreferenceResourceName,
Singular: instancetype.SingularPreferenceResourceName,
ShortNames: []string{"vmpref", "vmprefs", "vmp", "vmps"},
Kind: "VirtualMachinePreference",
Categories: []string{"all"},
},
Scope: extv1.NamespaceScoped,
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Versions: []extv1.CustomResourceDefinitionVersion{{
Name: instancetypev1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
}},
}
if err := patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineClusterPreferenceCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.Name = "virtualmachineclusterpreferences." + instancetypev1beta1.SchemeGroupVersion.Group
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: instancetypev1beta1.SchemeGroupVersion.Group,
Names: extv1.CustomResourceDefinitionNames{
Plural: instancetype.ClusterPluralPreferenceResourceName,
Singular: instancetype.ClusterSingularPreferenceResourceName,
ShortNames: []string{"vmcp", "vmcps"},
Kind: "VirtualMachineClusterPreference",
},
Scope: extv1.ClusterScoped,
Conversion: &extv1.CustomResourceConversion{
Strategy: extv1.NoneConverter,
},
Versions: []extv1.CustomResourceDefinitionVersion{{
Name: instancetypev1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
}},
}
if err := patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewMigrationPolicyCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = MIGRATIONPOLICY
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: migrationsv1.MigrationPolicyKind.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: migrationsv1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
},
},
Scope: extv1.ClusterScoped,
Names: extv1.CustomResourceDefinitionNames{
Plural: migrations.ResourceMigrationPolicies,
Singular: "migrationpolicy",
Kind: migrationsv1.MigrationPolicyKind.Kind,
},
}
err := addFieldsToAllVersions(crd, &extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
})
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
func NewVirtualMachineCloneCrd() (*extv1.CustomResourceDefinition, error) {
crd := newBlankCrd()
crd.ObjectMeta.Name = VIRTUALMACHINECLONE
crd.Spec = extv1.CustomResourceDefinitionSpec{
Group: clonev1alpha1.VirtualMachineCloneKind.Group,
Versions: []extv1.CustomResourceDefinitionVersion{
{
Name: clonev1alpha1.SchemeGroupVersion.Version,
Served: true,
Storage: false,
},
{
Name: clonev1beta1.SchemeGroupVersion.Version,
Served: true,
Storage: true,
},
},
Scope: extv1.NamespaceScoped,
Names: extv1.CustomResourceDefinitionNames{
Plural: clone.ResourceVMClonePlural,
Singular: clone.ResourceVMCloneSingular,
ShortNames: []string{"vmclone", "vmclones"},
Kind: clonev1alpha1.VirtualMachineCloneKind.Kind,
Categories: []string{
"all",
},
},
}
err := addFieldsToAllVersions(crd,
&extv1.CustomResourceSubresources{
Status: &extv1.CustomResourceSubresourceStatus{},
},
[]extv1.CustomResourceColumnDefinition{
{Name: "Phase", Type: "string", JSONPath: phaseJSONPath},
{Name: "SourceVirtualMachine", Type: "string", JSONPath: ".spec.source.name"},
{Name: "TargetVirtualMachine", Type: "string", JSONPath: ".spec.target.name"},
},
)
if err != nil {
return nil, err
}
if err = patchValidationForAllVersions(crd); err != nil {
return nil, err
}
return crd, nil
}
// NewKubeVirtPriorityClassCR is used for manifest generation
func NewKubeVirtPriorityClassCR() *schedulingv1.PriorityClass {
return &schedulingv1.PriorityClass{
TypeMeta: metav1.TypeMeta{
APIVersion: "scheduling.k8s.io/v1",
Kind: "PriorityClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubevirt-cluster-critical",
},
// 1 billion is the highest value we can set
// https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
Value: 1000000000,
GlobalDefault: false,
Description: "This priority class should be used for KubeVirt core components only.",
}
}
package components
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/storage/reservation"
"kubevirt.io/kubevirt/pkg/util"
operatorutil "kubevirt.io/kubevirt/pkg/virt-operator/util"
)
const (
VirtHandlerName = "virt-handler"
kubeletPodsPath = util.KubeletRoot + "/pods"
runtimesPath = "/var/run/kubevirt-libvirt-runtimes"
PrHelperName = "pr-helper"
prVolumeName = "pr-helper-socket-vol"
devDirVol = "dev-dir"
SidecarShimName = "sidecar-shim"
etcMultipath = "etc-multipath"
SupportsMigrationCNsValidation = "kubevirt.io/supports-migration-cn-types"
)
func RenderPrHelperContainer(image string, pullPolicy corev1.PullPolicy) corev1.Container {
bidi := corev1.MountPropagationBidirectional
return corev1.Container{
Name: PrHelperName,
Image: image,
ImagePullPolicy: pullPolicy,
Command: []string{"/entrypoint.sh"},
Args: []string{
"-k", reservation.GetPrHelperSocketPath(),
},
VolumeMounts: []corev1.VolumeMount{
{
Name: prVolumeName,
MountPath: reservation.GetPrHelperSocketDir(),
MountPropagation: &bidi,
},
{
Name: devDirVol,
MountPath: "/dev",
MountPropagation: pointer.P(corev1.MountPropagationHostToContainer),
},
{
Name: etcMultipath,
MountPath: "/etc/multipath",
MountPropagation: &bidi,
},
},
SecurityContext: &corev1.SecurityContext{
RunAsUser: pointer.P(int64(util.RootUser)),
Privileged: pointer.P(true),
},
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
}
}
func NewHandlerDaemonSet(namespace, repository, imagePrefix, version, launcherVersion, prHelperVersion, sidecarShimVersion, productName, productVersion, productComponent, image, launcherImage, prHelperImage, sidecarShimImage string, pullPolicy corev1.PullPolicy, imagePullSecrets []corev1.LocalObjectReference, migrationNetwork *string, verbosity string, extraEnv map[string]string, enablePrHelper bool) *appsv1.DaemonSet {
deploymentName := VirtHandlerName
imageName := fmt.Sprintf("%s%s", imagePrefix, deploymentName)
env := operatorutil.NewEnvVarMap(extraEnv)
podTemplateSpec := newPodTemplateSpec(deploymentName, imageName, repository, version, productName, productVersion, productComponent, image, pullPolicy, imagePullSecrets, nil, env)
if launcherImage == "" {
launcherImage = fmt.Sprintf("%s/%s%s%s", repository, imagePrefix, "virt-launcher", AddVersionSeparatorPrefix(launcherVersion))
}
if migrationNetwork != nil {
if podTemplateSpec.ObjectMeta.Annotations == nil {
podTemplateSpec.ObjectMeta.Annotations = make(map[string]string)
}
// Join the pod to the migration network and name the corresponding interface "migration0"
podTemplateSpec.ObjectMeta.Annotations[networkv1.NetworkAttachmentAnnot] = *migrationNetwork + "@" + virtv1.MigrationInterfaceName
}
if podTemplateSpec.Annotations == nil {
podTemplateSpec.Annotations = make(map[string]string)
}
podTemplateSpec.Annotations["openshift.io/required-scc"] = "kubevirt-handler"
daemonset := &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: VirtHandlerName,
Labels: map[string]string{
virtv1.AppLabel: VirtHandlerName,
SupportsMigrationCNsValidation: "true",
},
},
Spec: appsv1.DaemonSetSpec{
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
Type: appsv1.RollingUpdateDaemonSetStrategyType,
},
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubevirt.io": VirtHandlerName,
},
},
Template: *podTemplateSpec,
},
}
if productVersion != "" {
daemonset.ObjectMeta.Labels[virtv1.AppVersionLabel] = productVersion
}
if productName != "" {
daemonset.ObjectMeta.Labels[virtv1.AppPartOfLabel] = productName
}
if productComponent != "" {
daemonset.ObjectMeta.Labels[virtv1.AppComponentLabel] = productComponent
}
pod := &daemonset.Spec.Template.Spec
pod.ServiceAccountName = HandlerServiceAccountName
pod.HostPID = true
// nodelabeller currently only support x86. The arch check will be done in node-labller.sh
pod.InitContainers = []corev1.Container{
{
Command: []string{
"/bin/sh",
"-c",
},
Image: launcherImage,
Name: "virt-launcher",
Args: []string{
"node-labeller.sh",
},
SecurityContext: &corev1.SecurityContext{
Privileged: pointer.P(true),
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "node-labeller",
MountPath: nodeLabellerVolumePath,
},
},
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
},
}
// If there is any image pull secret added to the `virt-handler` deployment
// it can mean that `virt-handler` is using private image. Therefore, we must
// add `virt-launcher` container that will pre-pull and keep the (probably)
// custom image of `virt-launcher`.
// Note that we cannot make it an init container because the `virt-launcher`
// image could be garbage collected by the kubelet.
// Note that we cannot add `imagePullSecrets` to `virt-launcher` as this could
// be a security risk - user could use this secret and abuse it.
if len(imagePullSecrets) > 0 {
pod.Containers = append(pod.Containers, corev1.Container{
Name: "virt-launcher-image-holder",
Image: launcherImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"/bin/sh", "-c"},
Args: []string{"sleep infinity"},
Resources: corev1.ResourceRequirements{
Limits: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("100m"),
corev1.ResourceMemory: resource.MustParse("20Mi"),
},
},
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
})
}
// give the handler grace period some padding
// in order to ensure we have a chance to cleanly exit
// before SIG_KILL
podGracePeriod := int64(330)
handlerGracePeriod := podGracePeriod - 15
podTemplateSpec.Spec.TerminationGracePeriodSeconds = &podGracePeriod
container := &pod.Containers[0]
container.Command = []string{
VirtHandlerName,
}
container.Args = []string{
"--port",
"8443",
"--hostname-override",
"$(NODE_NAME)",
"--pod-ip-address",
"$(MY_POD_IP)",
"--max-metric-requests",
"3",
"--console-server-port",
"8186",
"--graceful-shutdown-seconds",
fmt.Sprintf("%d", handlerGracePeriod),
"-v",
verbosity,
}
container.Ports = []corev1.ContainerPort{
{
Name: "metrics",
Protocol: corev1.ProtocolTCP,
ContainerPort: 8443,
},
}
container.SecurityContext = &corev1.SecurityContext{
Privileged: pointer.P(true),
SELinuxOptions: &corev1.SELinuxOptions{
Level: "s0",
},
}
containerEnv := []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
{
Name: "MY_POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
}
container.Env = append(container.Env, containerEnv...)
container.LivenessProbe = &corev1.Probe{
FailureThreshold: 3,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: "/healthz",
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 10,
PeriodSeconds: 45,
}
container.ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: "/healthz",
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 10,
PeriodSeconds: 20,
}
type volume struct {
name string
path string
mountPath string
mountPropagation *corev1.MountPropagationMode
}
attachCertificateSecret(pod, VirtHandlerCertSecretName, "/etc/virt-handler/clientcertificates")
attachCertificateSecret(pod, VirtHandlerServerCertSecretName, "/etc/virt-handler/servercertificates")
attachCertificateSecret(pod, VirtHandlerMigrationClientCertSecretName, "/etc/virt-handler/migrationservercertificates")
attachCertificateSecret(pod, VirtHandlerVsockClientCertSecretName, "/etc/virt-handler/vsockclientcertificates")
attachProfileVolume(pod)
bidi := corev1.MountPropagationBidirectional
// NOTE: the 'kubelet-pods' volume mount exists because that path holds unix socket files.
// Socket files fail when their path is longer than 108 characters,
// so that shortened volume path is to allow domain socket connections.
// It's ridiculous to have to account for that, but that's the situation we're in.
volumes := []volume{
{"libvirt-runtimes", runtimesPath, runtimesPath, nil},
{"virt-share-dir", util.VirtShareDir, util.VirtShareDir, &bidi},
{"virt-private-dir", util.VirtPrivateDir, util.VirtPrivateDir, nil},
{"kubelet-pods", kubeletPodsPath, "/pods", nil},
{"kubelet", util.KubeletRoot, util.KubeletRoot, &bidi},
{"node-labeller", nodeLabellerVolumePath, nodeLabellerVolumePath, nil},
}
for _, volume := range volumes {
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
Name: volume.name,
MountPath: volume.mountPath,
MountPropagation: volume.mountPropagation,
})
pod.Volumes = append(pod.Volumes, corev1.Volume{
Name: volume.name,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: volume.path,
},
},
})
}
// Use the downward API to access the network status annotations
// TODO: This is not used anymore, but can't be removed because of https://github.com/kubevirt/kubevirt/issues/10632
// Since CR-based updates use the wrong install strategy, removing this volume and downgrading via CR will try to
// run the previous version of virt-handler without the volume, which will fail and CrashLoop.
// Please remove the volume once the above issue is fixed.
container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{
Name: "podinfo",
MountPath: "/etc/podinfo",
})
pod.Volumes = append(pod.Volumes, corev1.Volume{
Name: "podinfo",
VolumeSource: corev1.VolumeSource{
DownwardAPI: &corev1.DownwardAPIVolumeSource{
Items: []corev1.DownwardAPIVolumeFile{
{
Path: "network-status",
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: `metadata.annotations['k8s.v1.cni.cncf.io/network-status']`,
},
},
},
},
},
})
container.Resources = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("325Mi"),
},
}
if prHelperImage == "" {
prHelperImage = fmt.Sprintf("%s/%s%s%s", repository, imagePrefix, PrHelperName, AddVersionSeparatorPrefix(prHelperVersion))
}
if sidecarShimImage == "" {
sidecarShimImage = fmt.Sprintf("%s/%s%s%s", repository, imagePrefix, SidecarShimName, AddVersionSeparatorPrefix(sidecarShimVersion))
}
if enablePrHelper {
directoryOrCreate := corev1.HostPathDirectoryOrCreate
pod.Volumes = append(pod.Volumes, corev1.Volume{
Name: prVolumeName,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: reservation.GetPrHelperSocketDir(),
Type: &directoryOrCreate,
},
}}, corev1.Volume{
Name: devDirVol,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/dev",
},
}}, corev1.Volume{
Name: etcMultipath,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/multipath",
Type: pointer.P(corev1.HostPathDirectoryOrCreate),
},
}})
pod.Containers = append(pod.Containers, RenderPrHelperContainer(prHelperImage, pullPolicy))
}
return daemonset
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package components
import (
"fmt"
"path"
"strings"
networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
"k8s.io/apimachinery/pkg/api/resource"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/pointer"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/placement"
operatorutil "kubevirt.io/kubevirt/pkg/virt-operator/util"
)
const (
nodeLabellerVolumePath = "/var/lib/kubevirt-node-labeller"
VirtAPIName = "virt-api"
VirtControllerName = "virt-controller"
VirtOperatorName = "virt-operator"
VirtExportProxyName = "virt-exportproxy"
VirtSynchronizationControllerName = "virt-synchronization-controller"
kubevirtLabelKey = "kubevirt.io"
portName = "--port"
)
func NewPrometheusService(namespace string) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "kubevirt-prometheus-metrics",
Labels: map[string]string{
virtv1.AppLabel: "",
prometheusLabelKey: prometheusLabelValue,
},
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
prometheusLabelKey: prometheusLabelValue,
},
Ports: []corev1.ServicePort{
{
Name: "metrics",
Port: 443,
TargetPort: intstr.IntOrString{
Type: intstr.String,
StrVal: "metrics",
},
Protocol: corev1.ProtocolTCP,
},
},
Type: corev1.ServiceTypeClusterIP,
ClusterIP: corev1.ClusterIPNone,
},
}
}
func NewApiServerService(namespace string) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: VirtAPIName,
Labels: map[string]string{
virtv1.AppLabel: VirtAPIName,
},
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
virtv1.AppLabel: VirtAPIName,
},
Ports: []corev1.ServicePort{
{
Port: 443,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Protocol: corev1.ProtocolTCP,
},
},
Type: corev1.ServiceTypeClusterIP,
},
}
}
func NewExportProxyService(namespace string) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: VirtExportProxyName,
Labels: map[string]string{
virtv1.AppLabel: VirtExportProxyName,
},
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
virtv1.AppLabel: VirtExportProxyName,
},
Ports: []corev1.ServicePort{
{
Port: 443,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Protocol: corev1.ProtocolTCP,
},
},
Type: corev1.ServiceTypeClusterIP,
},
}
}
func newPodTemplateSpec(podName, imageName, repository, version, productName, productVersion, productComponent, image string, pullPolicy corev1.PullPolicy, imagePullSecrets []corev1.LocalObjectReference, podAffinity *corev1.Affinity, envVars *[]corev1.EnvVar) *corev1.PodTemplateSpec {
if image == "" {
image = fmt.Sprintf("%s/%s%s", repository, imageName, AddVersionSeparatorPrefix(version))
}
podTemplateSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
virtv1.AppLabel: podName,
prometheusLabelKey: prometheusLabelValue,
virtv1.AllowAccessClusterServicesNPLabel: "true",
},
Name: podName,
},
Spec: corev1.PodSpec{
PriorityClassName: "kubevirt-cluster-critical",
Affinity: podAffinity,
Tolerations: criticalAddonsToleration(),
Containers: []corev1.Container{
{
Name: podName,
Image: image,
ImagePullPolicy: pullPolicy,
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
},
},
},
}
if len(imagePullSecrets) > 0 {
podTemplateSpec.Spec.ImagePullSecrets = imagePullSecrets
}
if productVersion != "" {
podTemplateSpec.ObjectMeta.Labels[virtv1.AppVersionLabel] = productVersion
}
if productName != "" {
podTemplateSpec.ObjectMeta.Labels[virtv1.AppPartOfLabel] = productName
}
if productComponent != "" {
podTemplateSpec.ObjectMeta.Labels[virtv1.AppComponentLabel] = productComponent
}
if envVars != nil && len(*envVars) != 0 {
podTemplateSpec.Spec.Containers[0].Env = *envVars
}
return podTemplateSpec
}
func attachProfileVolume(spec *corev1.PodSpec) {
volume := corev1.Volume{
Name: "profile-data",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
}
volumeMount := corev1.VolumeMount{
Name: "profile-data",
MountPath: "/profile-data",
}
spec.Volumes = append(spec.Volumes, volume)
spec.Containers[0].VolumeMounts = append(spec.Containers[0].VolumeMounts, volumeMount)
}
func attachCertificateSecret(spec *corev1.PodSpec, secretName string, mountPath string) {
True := true
secretVolume := corev1.Volume{
Name: secretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
Optional: &True,
},
},
}
secretVolumeMount := corev1.VolumeMount{
Name: secretName,
ReadOnly: true,
MountPath: mountPath,
}
spec.Volumes = append(spec.Volumes, secretVolume)
spec.Containers[0].VolumeMounts = append(spec.Containers[0].VolumeMounts, secretVolumeMount)
}
func newBaseDeployment(deploymentName, imageName, namespace, repository, version, productName, productVersion, productComponent, image string, pullPolicy corev1.PullPolicy, imagePullSecrets []corev1.LocalObjectReference, podAffinity *corev1.Affinity, envVars *[]corev1.EnvVar) *appsv1.Deployment {
podTemplateSpec := newPodTemplateSpec(deploymentName, imageName, repository, version, productName, productVersion, productComponent, image, pullPolicy, imagePullSecrets, podAffinity, envVars)
deployment := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: deploymentName,
Labels: map[string]string{
virtv1.AppLabel: deploymentName,
virtv1.AppNameLabel: deploymentName,
},
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.P(int32(2)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
kubevirtLabelKey: deploymentName,
},
},
Template: *podTemplateSpec,
},
}
if productVersion != "" {
deployment.ObjectMeta.Labels[virtv1.AppVersionLabel] = productVersion
}
if productName != "" {
deployment.ObjectMeta.Labels[virtv1.AppPartOfLabel] = productName
}
if productComponent != "" {
deployment.ObjectMeta.Labels[virtv1.AppComponentLabel] = productComponent
}
return deployment
}
func newPodAntiAffinity(key, topologyKey string, operator metav1.LabelSelectorOperator, values []string) *corev1.Affinity {
return &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
{
Weight: 1,
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: key,
Operator: operator,
Values: values,
},
},
},
TopologyKey: topologyKey,
},
},
},
},
}
}
func NewApiServerDeployment(namespace, repository, imagePrefix, version, productName, productVersion, productComponent, image string, pullPolicy corev1.PullPolicy, imagePullSecrets []corev1.LocalObjectReference, verbosity string, extraEnv map[string]string) *appsv1.Deployment {
podAntiAffinity := newPodAntiAffinity(kubevirtLabelKey, corev1.LabelHostname, metav1.LabelSelectorOpIn, []string{VirtAPIName})
deploymentName := VirtAPIName
imageName := fmt.Sprintf("%s%s", imagePrefix, deploymentName)
env := operatorutil.NewEnvVarMap(extraEnv)
deployment := newBaseDeployment(deploymentName, imageName, namespace, repository, version, productName, productVersion, productComponent, image, pullPolicy, imagePullSecrets, podAntiAffinity, env)
if deployment.Spec.Template.Annotations == nil {
deployment.Spec.Template.Annotations = make(map[string]string)
}
deployment.Spec.Template.Annotations["openshift.io/required-scc"] = "restricted-v2"
attachCertificateSecret(&deployment.Spec.Template.Spec, VirtApiCertSecretName, "/etc/virt-api/certificates")
attachCertificateSecret(&deployment.Spec.Template.Spec, VirtHandlerCertSecretName, "/etc/virt-handler/clientcertificates")
attachProfileVolume(&deployment.Spec.Template.Spec)
pod := &deployment.Spec.Template.Spec
pod.ServiceAccountName = ApiServiceAccountName
pod.SecurityContext = &corev1.PodSecurityContext{
RunAsNonRoot: pointer.P(true),
SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
}
container := &deployment.Spec.Template.Spec.Containers[0]
container.Command = []string{
VirtAPIName,
}
container.Args = []string{
portName,
"8443",
"--console-server-port",
"8186",
"--subresources-only",
"-v",
verbosity,
}
container.Ports = []corev1.ContainerPort{
{
Name: VirtAPIName,
Protocol: corev1.ProtocolTCP,
ContainerPort: 8443,
},
{
Name: "metrics",
Protocol: corev1.ProtocolTCP,
ContainerPort: 8443,
},
}
container.ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: path.Join("/apis/subresources.kubevirt.io", virtv1.SubresourceGroupVersions[0].Version, "healthz"),
},
},
InitialDelaySeconds: 15,
PeriodSeconds: 10,
}
container.Resources = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("5m"),
corev1.ResourceMemory: resource.MustParse("500Mi"),
},
}
container.SecurityContext = &corev1.SecurityContext{
AllowPrivilegeEscalation: pointer.P(false),
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
}
return deployment
}
func NewControllerDeployment(namespace, repository, imagePrefix, controllerVersion, launcherVersion, exportServerVersion, sidecarVersion, productName, productVersion, productComponent, image, launcherImage, exporterImage, sidecarImage string, pullPolicy corev1.PullPolicy, imagePullSecrets []corev1.LocalObjectReference, verbosity string, extraEnv map[string]string) *appsv1.Deployment {
podAntiAffinity := newPodAntiAffinity(kubevirtLabelKey, corev1.LabelHostname, metav1.LabelSelectorOpIn, []string{VirtControllerName})
deploymentName := VirtControllerName
imageName := fmt.Sprintf("%s%s", imagePrefix, deploymentName)
env := operatorutil.NewEnvVarMap(extraEnv)
deployment := newBaseDeployment(deploymentName, imageName, namespace, repository, controllerVersion, productName, productVersion, productComponent, image, pullPolicy, imagePullSecrets, podAntiAffinity, env)
if deployment.Spec.Template.Annotations == nil {
deployment.Spec.Template.Annotations = make(map[string]string)
}
deployment.Spec.Template.Annotations["openshift.io/required-scc"] = "restricted-v2"
if launcherImage == "" {
launcherImage = fmt.Sprintf("%s/%s%s%s", repository, imagePrefix, "virt-launcher", AddVersionSeparatorPrefix(launcherVersion))
}
if exporterImage == "" {
exporterImage = fmt.Sprintf("%s/%s%s%s", repository, imagePrefix, "virt-exportserver", AddVersionSeparatorPrefix(exportServerVersion))
}
pod := &deployment.Spec.Template.Spec
pod.ServiceAccountName = ControllerServiceAccountName
pod.SecurityContext = &corev1.PodSecurityContext{
RunAsNonRoot: pointer.P(true),
SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
}
container := &deployment.Spec.Template.Spec.Containers[0]
container.Command = []string{
VirtControllerName,
}
container.Args = []string{
"--launcher-image",
launcherImage,
"--exporter-image",
exporterImage,
portName,
"8443",
"-v",
verbosity,
}
container.Ports = []corev1.ContainerPort{
{
Name: "metrics",
Protocol: corev1.ProtocolTCP,
ContainerPort: 8443,
},
}
container.LivenessProbe = &corev1.Probe{
FailureThreshold: 8,
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: "/healthz",
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 10,
}
container.ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: "/leader",
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 10,
}
attachCertificateSecret(pod, VirtControllerCertSecretName, "/etc/virt-controller/certificates")
attachCertificateSecret(pod, KubeVirtExportCASecretName, "/etc/virt-controller/exportca")
attachProfileVolume(pod)
container.Resources = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("275Mi"),
},
}
container.SecurityContext = &corev1.SecurityContext{
AllowPrivilegeEscalation: pointer.P(false),
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
}
if sidecarImage == "" {
sidecarImage = fmt.Sprintf("%s/%s%s%s", repository, imagePrefix, "sidecar-shim", AddVersionSeparatorPrefix(sidecarVersion))
}
container.Env = append(container.Env, corev1.EnvVar{Name: operatorutil.SidecarShimImageEnvName, Value: sidecarImage})
return deployment
}
// Used for manifest generation only
func NewOperatorDeployment(namespace, repository, imagePrefix, version, verbosity, kubeVirtVersionEnv, runbookURLTemplate, virtApiImageEnv, virtControllerImageEnv, virtHandlerImageEnv, virtLauncherImageEnv, virtExportProxyImageEnv, virtExportServerImageEnv, virtSynchronizationControllerImageEnv, gsImage, prHelperImage, sidecarShimImage,
image string, pullPolicy corev1.PullPolicy) *appsv1.Deployment {
const kubernetesOSLinux = "linux"
podAntiAffinity := newPodAntiAffinity(kubevirtLabelKey, corev1.LabelHostname, metav1.LabelSelectorOpIn, []string{VirtOperatorName})
version = AddVersionSeparatorPrefix(version)
if image == "" {
image = fmt.Sprintf("%s/%s%s%s", repository, imagePrefix, VirtOperatorName, version)
}
deployment := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: VirtOperatorName,
Labels: map[string]string{
virtv1.AppLabel: VirtOperatorName,
},
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.P(int32(2)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
virtv1.AppLabel: VirtOperatorName,
},
},
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
virtv1.AppLabel: VirtOperatorName,
virtv1.AppName: VirtOperatorName,
prometheusLabelKey: prometheusLabelValue,
virtv1.AllowAccessClusterServicesNPLabel: "true",
},
Name: VirtOperatorName,
},
Spec: corev1.PodSpec{
PriorityClassName: "kubevirt-cluster-critical",
Tolerations: criticalAddonsToleration(),
Affinity: podAntiAffinity,
ServiceAccountName: "kubevirt-operator",
NodeSelector: map[string]string{
corev1.LabelOSStable: kubernetesOSLinux,
},
Containers: []corev1.Container{
{
Name: VirtOperatorName,
Image: image,
ImagePullPolicy: pullPolicy,
Command: []string{
VirtOperatorName,
},
Args: []string{
portName,
"8443",
"-v",
verbosity,
},
Ports: []corev1.ContainerPort{
{
Name: "metrics",
Protocol: corev1.ProtocolTCP,
ContainerPort: 8443,
},
{
Name: "webhooks",
Protocol: corev1.ProtocolTCP,
ContainerPort: 8444,
},
},
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: "/metrics",
},
},
InitialDelaySeconds: 5,
TimeoutSeconds: 10,
},
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: "/metrics",
},
},
InitialDelaySeconds: 5,
TimeoutSeconds: 10,
},
Env: []corev1.EnvVar{
{
Name: operatorutil.VirtOperatorImageEnvName,
Value: image,
},
{
Name: "WATCH_NAMESPACE", // not used yet
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.annotations['olm.targetNamespaces']", // filled by OLM
},
},
},
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("450Mi"),
},
},
SecurityContext: &corev1.SecurityContext{
AllowPrivilegeEscalation: pointer.P(false),
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
},
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
},
},
SecurityContext: &corev1.PodSecurityContext{
RunAsNonRoot: pointer.P(true),
SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
},
},
},
},
}
if deployment.Spec.Template.Annotations == nil {
deployment.Spec.Template.Annotations = make(map[string]string)
}
deployment.Spec.Template.Annotations["openshift.io/required-scc"] = "restricted-v2"
envVars := generateVirtOperatorEnvVars(
runbookURLTemplate, virtApiImageEnv, virtControllerImageEnv, virtHandlerImageEnv, virtLauncherImageEnv, virtExportProxyImageEnv,
virtExportServerImageEnv, virtSynchronizationControllerImageEnv, gsImage, prHelperImage, sidecarShimImage, kubeVirtVersionEnv,
)
if envVars != nil {
deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVars...)
}
attachCertificateSecret(&deployment.Spec.Template.Spec, VirtOperatorCertSecretName, "/etc/virt-operator/certificates")
attachProfileVolume(&deployment.Spec.Template.Spec)
placement.InjectPlacementMetadata(nil, &deployment.Spec.Template.Spec, placement.RequireControlPlanePreferNonWorker)
return deployment
}
func NewExportProxyDeployment(namespace, repository, imagePrefix, version, productName, productVersion, productComponent, image string, pullPolicy corev1.PullPolicy, imagePullSecrets []corev1.LocalObjectReference, verbosity string, extraEnv map[string]string) *appsv1.Deployment {
podAntiAffinity := newPodAntiAffinity(kubevirtLabelKey, corev1.LabelHostname, metav1.LabelSelectorOpIn, []string{VirtAPIName})
deploymentName := VirtExportProxyName
imageName := fmt.Sprintf("%s%s", imagePrefix, deploymentName)
env := operatorutil.NewEnvVarMap(extraEnv)
deployment := newBaseDeployment(deploymentName, imageName, namespace, repository, version, productName, productVersion, productComponent, image, pullPolicy, imagePullSecrets, podAntiAffinity, env)
if deployment.Spec.Template.Annotations == nil {
deployment.Spec.Template.Annotations = make(map[string]string)
}
deployment.Spec.Template.Annotations["openshift.io/required-scc"] = "restricted-v2"
attachCertificateSecret(&deployment.Spec.Template.Spec, VirtExportProxyCertSecretName, "/etc/virt-exportproxy/certificates")
attachProfileVolume(&deployment.Spec.Template.Spec)
pod := &deployment.Spec.Template.Spec
pod.ServiceAccountName = ExportProxyServiceAccountName
pod.SecurityContext = &corev1.PodSecurityContext{
RunAsNonRoot: pointer.P(true),
}
const shortName = "exportproxy"
container := &deployment.Spec.Template.Spec.Containers[0]
// virt-exportproxy too long
container.Name = shortName
container.Command = []string{
VirtExportProxyName,
portName,
"8443",
"-v",
verbosity,
}
container.Ports = []corev1.ContainerPort{
{
Name: shortName,
Protocol: corev1.ProtocolTCP,
ContainerPort: 8443,
},
{
Name: "metrics",
Protocol: corev1.ProtocolTCP,
ContainerPort: 8443,
},
}
container.ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: "/healthz",
},
},
InitialDelaySeconds: 15,
PeriodSeconds: 10,
}
container.Resources = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("5m"),
corev1.ResourceMemory: resource.MustParse("150Mi"),
},
}
return deployment
}
func NewSynchronizationControllerDeployment(
namespace,
repository,
imagePrefix,
version,
productName,
productVersion,
productComponent,
image string,
pullPolicy corev1.PullPolicy,
imagePullSecrets []corev1.LocalObjectReference,
migrationNetwork *string,
syncPort int32,
verbosity string,
extraEnv map[string]string) *appsv1.Deployment {
podAntiAffinity := newPodAntiAffinity(kubevirtLabelKey, corev1.LabelHostname, metav1.LabelSelectorOpIn, []string{VirtSynchronizationControllerName})
deploymentName := VirtSynchronizationControllerName
imageName := fmt.Sprintf("%s%s", imagePrefix, deploymentName)
env := operatorutil.NewEnvVarMap(extraEnv)
*env = append(*env, corev1.EnvVar{
Name: "MY_POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
})
deployment := newBaseDeployment(deploymentName, imageName, namespace, repository, version, productName, productVersion, productComponent, image, pullPolicy, imagePullSecrets, podAntiAffinity, env)
if deployment.Spec.Template.Annotations == nil {
deployment.Spec.Template.Annotations = make(map[string]string)
}
// remove the prometheus label key, so prometheus doesn't try to scrape anything of the synchronization controller.
delete(deployment.Spec.Template.Labels, prometheusLabelKey)
deployment.Spec.Template.Annotations["openshift.io/required-scc"] = "restricted-v2"
if migrationNetwork != nil {
// Join the pod to the migration network and name the corresponding interface "migration0"
deployment.Spec.Template.ObjectMeta.Annotations[networkv1.NetworkAttachmentAnnot] = *migrationNetwork + "@" + virtv1.MigrationInterfaceName
}
attachCertificateSecret(&deployment.Spec.Template.Spec, VirtSynchronizationControllerCertSecretName, "/etc/virt-sync-controller/clientcertificates")
attachCertificateSecret(&deployment.Spec.Template.Spec, VirtSynchronizationControllerServerCertSecretName, "/etc/virt-sync-controller/servercertificates")
attachProfileVolume(&deployment.Spec.Template.Spec)
pod := &deployment.Spec.Template.Spec
pod.ServiceAccountName = SynchronizationControllerServiceAccountName
pod.SecurityContext = &corev1.PodSecurityContext{
RunAsNonRoot: pointer.P(true),
}
const shortName = "sync"
container := &deployment.Spec.Template.Spec.Containers[0]
// synchronization-controller too long
container.Name = shortName
container.Command = []string{
VirtSynchronizationControllerName,
"--v",
verbosity,
"--port",
fmt.Sprintf("%d", syncPort),
}
container.Ports = []corev1.ContainerPort{
{
Name: "metrics",
Protocol: corev1.ProtocolTCP,
ContainerPort: 8443,
},
{
Name: shortName,
Protocol: corev1.ProtocolTCP,
ContainerPort: syncPort,
},
}
container.ReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8443,
},
Path: "/healthz",
},
},
InitialDelaySeconds: 15,
PeriodSeconds: 10,
}
container.Resources = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("5m"),
corev1.ResourceMemory: resource.MustParse("150Mi"),
},
}
return deployment
}
func criticalAddonsToleration() []corev1.Toleration {
return []corev1.Toleration{
{
Key: "CriticalAddonsOnly",
Operator: corev1.TolerationOpExists,
},
}
}
func AddVersionSeparatorPrefix(version string) string {
// version can be a template or a tag
// prefix tags with ":"
// templates have to deal with the correct image/version separator themselves
if !strings.HasPrefix(version, "{{if") {
version = fmt.Sprintf(":%s", version)
}
return version
}
func NewPodDisruptionBudgetForDeployment(deployment *appsv1.Deployment) *policyv1.PodDisruptionBudget {
pdbName := deployment.Name + "-pdb"
minAvailable := intstr.FromInt(1)
if deployment.Spec.Replicas != nil {
minAvailable = intstr.FromInt(int(*deployment.Spec.Replicas - 1))
}
selector := deployment.Spec.Selector.DeepCopy()
podDisruptionBudget := &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Namespace: deployment.Namespace,
Name: pdbName,
Labels: map[string]string{
virtv1.AppLabel: pdbName,
},
},
Spec: policyv1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: selector,
},
}
return podDisruptionBudget
}
func generateVirtOperatorEnvVars(runbookURLTemplate, virtApiImageEnv, virtControllerImageEnv, virtHandlerImageEnv, virtLauncherImageEnv, virtExportProxyImageEnv,
virtExportServerImageEnv, virtSynchronizationControllerImageEnv, gsImage, prHelperImage, sidecarShimImage, kubeVirtVersionEnv string) (envVars []corev1.EnvVar) {
addEnvVar := func(envVarName, envVarValue string) {
envVars = append(envVars, corev1.EnvVar{
Name: envVarName,
Value: envVarValue,
})
}
if virtApiImageEnv != "" {
addEnvVar(operatorutil.VirtApiImageEnvName, virtApiImageEnv)
}
if virtControllerImageEnv != "" {
addEnvVar(operatorutil.VirtControllerImageEnvName, virtControllerImageEnv)
}
if virtHandlerImageEnv != "" {
addEnvVar(operatorutil.VirtHandlerImageEnvName, virtHandlerImageEnv)
}
if virtLauncherImageEnv != "" {
addEnvVar(operatorutil.VirtLauncherImageEnvName, virtLauncherImageEnv)
}
if virtExportProxyImageEnv != "" {
addEnvVar(operatorutil.VirtExportProxyImageEnvName, virtExportProxyImageEnv)
}
if virtExportServerImageEnv != "" {
addEnvVar(operatorutil.VirtExportServerImageEnvName, virtExportServerImageEnv)
}
if virtSynchronizationControllerImageEnv != "" {
addEnvVar(operatorutil.VirtSynchronizationControllerImageEnvName, virtSynchronizationControllerImageEnv)
}
if gsImage != "" {
addEnvVar(operatorutil.GsImageEnvName, gsImage)
}
if runbookURLTemplate != "" {
addEnvVar(operatorutil.RunbookURLTemplate, runbookURLTemplate)
}
if prHelperImage != "" {
addEnvVar(operatorutil.PrHelperImageEnvName, prHelperImage)
}
if sidecarShimImage != "" {
addEnvVar(operatorutil.SidecarShimImageEnvName, sidecarShimImage)
}
if kubeVirtVersionEnv != "" {
addEnvVar(operatorutil.KubeVirtVersionEnvName, kubeVirtVersionEnv)
}
return envVars
}
package components
import (
"bytes"
_ "embed"
"io"
"k8s.io/apimachinery/pkg/util/yaml"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
//go:embed data/common-clusterinstancetypes-bundle.yaml
var clusterInstancetypesBundle []byte
//go:embed data/common-clusterpreferences-bundle.yaml
var clusterPreferencesBundle []byte
func NewClusterInstancetypes() ([]*instancetypev1beta1.VirtualMachineClusterInstancetype, error) {
return decodeResources[instancetypev1beta1.VirtualMachineClusterInstancetype](clusterInstancetypesBundle)
}
func NewClusterPreferences() ([]*instancetypev1beta1.VirtualMachineClusterPreference, error) {
return decodeResources[instancetypev1beta1.VirtualMachineClusterPreference](clusterPreferencesBundle)
}
type clusterType interface {
instancetypev1beta1.VirtualMachineClusterInstancetype | instancetypev1beta1.VirtualMachineClusterPreference
}
func decodeResources[C clusterType](b []byte) ([]*C, error) {
decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(b), 1024)
var bundle []*C
for {
bundleResource := new(C)
err := decoder.Decode(bundleResource)
if err == io.EOF {
return bundle, nil
}
if err != nil {
return nil, err
}
bundle = append(bundle, bundleResource)
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package components
import (
k8sv1 "k8s.io/api/core/v1"
networkv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"kubevirt.io/kubevirt/pkg/pointer"
)
const (
allowIngressToMetrics = "kubevirt-allow-ingress-to-metrics"
allowIngressToVirtApiWebhook = "kubevirt-allow-ingress-to-virt-api-webhook-server"
allowVirtApiToComponents = "kubevirt-allow-virt-api-to-components"
allowVirtApiToLaunchers = "kubevirt-allow-virt-api-to-launchers"
allowVirtApiToHandlers = "kubevirt-allow-virt-api-to-virt-handler"
allowIngressToHandler = "kubevirt-allow-ingress-to-virt-handler"
allowIngressToVirtOperatorWebhook = "kubevirt-allow-ingress-to-virt-operator-webhook-server"
allowExportProxyCommunications = "kubevirt-allow-virt-exportproxy-communications"
allowHandlerToHandler = "kubevirt-allow-handler-to-handler"
allowHandlerToPrometheus = "kubevirt-allow-handler-to-prometheus"
)
// NewKubeVirtNetworkPolicies returns the network policies required by kv to operate
func NewKubeVirtNetworkPolicies(namespace string) []*networkv1.NetworkPolicy {
return []*networkv1.NetworkPolicy{
newIngressToMetricsNP(namespace),
newVirtApiWebhookNP(namespace),
newVirtApiToComponentsNP(namespace),
newVirtApiToLaunchersNP(namespace),
newVirtApiToHandlersNP(namespace),
newHandlersToVirtApiNP(namespace),
newVirtOperatorWebhookNP(namespace),
newExportProxyNP(namespace),
newHandlerToHandlerNP(namespace),
newHandlerToPrometheusNP(namespace),
}
}
func newNetworkPolicy(namespace, name string, spec *networkv1.NetworkPolicySpec) *networkv1.NetworkPolicy {
return &networkv1.NetworkPolicy{
TypeMeta: metav1.TypeMeta{
APIVersion: "networking.k8s.io/v1",
Kind: "NetworkPolicy",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: *spec,
}
}
func newIngressToMetricsNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowIngressToMetrics,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: kubevirtLabelKey,
Operator: metav1.LabelSelectorOpIn,
Values: []string{
VirtOperatorName,
VirtHandlerName,
VirtControllerName,
VirtAPIName,
VirtExportProxyName,
VirtSynchronizationControllerName,
},
},
},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeIngress},
Ingress: []networkv1.NetworkPolicyIngressRule{
{
Ports: []networkv1.NetworkPolicyPort{
{
Port: pointer.P(intstr.FromInt32(8443)),
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
},
)
}
func newVirtApiWebhookNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowIngressToVirtApiWebhook,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtAPIName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeIngress},
Ingress: []networkv1.NetworkPolicyIngressRule{
{
Ports: []networkv1.NetworkPolicyPort{
{
Port: pointer.P(intstr.FromInt32(8443)),
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
},
)
}
func newVirtApiToComponentsNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowVirtApiToComponents,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtAPIName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeEgress},
Egress: []networkv1.NetworkPolicyEgressRule{
{
To: []networkv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: kubevirtLabelKey,
Operator: metav1.LabelSelectorOpIn,
Values: []string{
VirtOperatorName,
VirtHandlerName,
VirtControllerName,
VirtAPIName,
},
},
},
},
},
},
Ports: []networkv1.NetworkPolicyPort{
{
Port: pointer.P(intstr.FromInt32(8443)),
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
},
)
}
func newVirtApiToLaunchersNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowVirtApiToLaunchers,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtAPIName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeEgress},
Egress: []networkv1.NetworkPolicyEgressRule{
{
To: []networkv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: "virt-launcher"},
},
NamespaceSelector: &metav1.LabelSelector{},
},
},
},
},
},
)
}
func newVirtApiToHandlersNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowVirtApiToHandlers,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtAPIName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeEgress},
Egress: []networkv1.NetworkPolicyEgressRule{
{
To: []networkv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtHandlerName},
},
},
},
Ports: []networkv1.NetworkPolicyPort{
{
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
},
)
}
func newHandlersToVirtApiNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowIngressToHandler,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtHandlerName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeIngress},
Ingress: []networkv1.NetworkPolicyIngressRule{
{
From: []networkv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtAPIName},
},
},
},
Ports: []networkv1.NetworkPolicyPort{
{
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
},
)
}
func newVirtOperatorWebhookNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowIngressToVirtOperatorWebhook,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtOperatorName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeIngress},
Ingress: []networkv1.NetworkPolicyIngressRule{
{
Ports: []networkv1.NetworkPolicyPort{
{
Port: pointer.P(intstr.FromInt32(8444)),
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
},
)
}
func newExportProxyNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowExportProxyCommunications,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtExportProxyName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeIngress, networkv1.PolicyTypeEgress},
Ingress: []networkv1.NetworkPolicyIngressRule{
{
Ports: []networkv1.NetworkPolicyPort{
{
Port: pointer.P(intstr.FromInt32(8443)),
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
Egress: []networkv1.NetworkPolicyEgressRule{
{
To: []networkv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "kubevirt.io.virt-export-service",
Operator: metav1.LabelSelectorOpExists,
},
},
},
NamespaceSelector: &metav1.LabelSelector{},
},
},
Ports: []networkv1.NetworkPolicyPort{
{
Port: pointer.P(intstr.FromInt32(8443)),
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
},
)
}
func newHandlerToHandlerNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowHandlerToHandler,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtHandlerName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeIngress, networkv1.PolicyTypeEgress},
Ingress: []networkv1.NetworkPolicyIngressRule{
{
From: []networkv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtHandlerName},
},
},
},
},
},
Egress: []networkv1.NetworkPolicyEgressRule{
{
To: []networkv1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtHandlerName},
},
},
},
},
},
},
)
}
func newHandlerToPrometheusNP(namespace string) *networkv1.NetworkPolicy {
return newNetworkPolicy(
namespace,
allowHandlerToPrometheus,
&networkv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{kubevirtLabelKey: VirtHandlerName},
},
PolicyTypes: []networkv1.PolicyType{networkv1.PolicyTypeEgress},
Egress: []networkv1.NetworkPolicyEgressRule{
{
Ports: []networkv1.NetworkPolicyPort{
{
Port: pointer.P(intstr.FromInt32(8443)),
Protocol: pointer.P(k8sv1.ProtocolTCP),
},
},
},
},
},
)
}
package components
import (
"github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
KUBEVIRT_PROMETHEUS_RULE_NAME = "prometheus-kubevirt-rules"
prometheusLabelKey = "prometheus.kubevirt.io"
prometheusLabelValue = "true"
)
func NewServiceMonitorCR(namespace string, monitorNamespace string, insecureSkipVerify bool) *promv1.ServiceMonitor {
return &promv1.ServiceMonitor{
TypeMeta: v12.TypeMeta{
APIVersion: monitoring.GroupName,
Kind: "ServiceMonitor",
},
ObjectMeta: v12.ObjectMeta{
Namespace: monitorNamespace,
Name: KUBEVIRT_PROMETHEUS_RULE_NAME,
Labels: map[string]string{
"openshift.io/cluster-monitoring": "",
prometheusLabelKey: prometheusLabelValue,
"k8s-app": "kubevirt",
},
},
Spec: promv1.ServiceMonitorSpec{
Selector: v12.LabelSelector{
MatchLabels: map[string]string{
prometheusLabelKey: prometheusLabelValue,
},
},
NamespaceSelector: promv1.NamespaceSelector{
MatchNames: []string{namespace},
},
Endpoints: []promv1.Endpoint{
{
Port: "metrics",
Scheme: "https",
TLSConfig: &promv1.TLSConfig{
SafeTLSConfig: promv1.SafeTLSConfig{
InsecureSkipVerify: &insecureSkipVerify,
},
},
HonorLabels: true,
},
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package components
import (
routev1 "github.com/openshift/api/route/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func GetAllRoutes(namespace string) []*routev1.Route {
return []*routev1.Route{
NewExportProxyRoute(namespace),
}
}
func newBlankRoute() *routev1.Route {
return &routev1.Route{
TypeMeta: metav1.TypeMeta{
APIVersion: "route.openshift.io/v1",
Kind: "Route",
},
}
}
func NewExportProxyRoute(namespace string) *routev1.Route {
route := newBlankRoute()
route.Namespace = namespace
route.Name = VirtExportProxyName
route.Spec.To.Kind = "Service"
route.Spec.To.Name = VirtExportProxyName
route.Spec.TLS = &routev1.TLSConfig{
Termination: routev1.TLSTerminationReencrypt,
InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect,
}
return route
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package components
import (
"fmt"
secv1 "github.com/openshift/api/security/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func GetAllSCC(namespace string) []*secv1.SecurityContextConstraints {
return []*secv1.SecurityContextConstraints{
NewKubeVirtHandlerSCC(namespace),
NewKubeVirtControllerSCC(namespace),
}
}
func newBlankSCC() *secv1.SecurityContextConstraints {
return &secv1.SecurityContextConstraints{
TypeMeta: metav1.TypeMeta{
APIVersion: "security.openshift.io/v1",
Kind: "SecurityContextConstraints",
},
}
}
func NewKubeVirtHandlerSCC(namespace string) *secv1.SecurityContextConstraints {
scc := newBlankSCC()
scc.Name = "kubevirt-handler"
scc.AllowPrivilegedContainer = true
scc.AllowHostPID = true
scc.AllowHostPorts = true
scc.AllowHostIPC = true
scc.RunAsUser = secv1.RunAsUserStrategyOptions{
Type: secv1.RunAsUserStrategyRunAsAny,
}
scc.SELinuxContext = secv1.SELinuxContextStrategyOptions{
Type: secv1.SELinuxStrategyRunAsAny,
}
scc.Volumes = []secv1.FSType{secv1.FSTypeAll}
scc.AllowHostDirVolumePlugin = true
scc.Users = []string{fmt.Sprintf("system:serviceaccount:%s:kubevirt-handler", namespace)}
return scc
}
func NewKubeVirtControllerSCC(namespace string) *secv1.SecurityContextConstraints {
scc := newBlankSCC()
scc.Name = "kubevirt-controller"
scc.AllowPrivilegedContainer = false
scc.RunAsUser = secv1.RunAsUserStrategyOptions{
Type: secv1.RunAsUserStrategyRunAsAny,
}
scc.SELinuxContext = secv1.SELinuxContextStrategyOptions{
Type: secv1.SELinuxStrategyRunAsAny,
}
scc.SeccompProfiles = []string{
"runtime/default",
"unconfined",
"localhost/kubevirt/kubevirt.json",
}
scc.AllowedCapabilities = []corev1.Capability{
// add CAP_SYS_NICE capability to allow setting cpu affinity
"SYS_NICE",
// add CAP_NET_BIND_SERVICE capability to allow dhcp and slirp operations
"NET_BIND_SERVICE",
}
scc.AllowHostDirVolumePlugin = true
scc.Users = []string{fmt.Sprintf("system:serviceaccount:%s:kubevirt-controller", namespace)}
return scc
}
package components
import (
"crypto/ecdsa"
"crypto/tls"
"crypto/x509"
"fmt"
"reflect"
"sort"
"time"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"kubevirt.io/client-go/log"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/certificates/bootstrap"
"kubevirt.io/kubevirt/pkg/certificates/triple"
"kubevirt.io/kubevirt/pkg/certificates/triple/cert"
)
// #nosec 101, false positives were caused by variables not holding any secret value.
const (
KubeVirtCASecretName = "kubevirt-ca"
ExternalKubeVirtCAConfigMapName = "kubevirt-external-ca"
KubeVirtExportCASecretName = "kubevirt-export-ca"
VirtHandlerCertSecretName = "kubevirt-virt-handler-certs"
VirtHandlerServerCertSecretName = "kubevirt-virt-handler-server-certs"
VirtHandlerMigrationClientCertSecretName = "kubevirt-virt-handler-migration-client-certs"
VirtHandlerVsockClientCertSecretName = "kubevirt-virt-handler-vsock-client-certs"
VirtOperatorCertSecretName = "kubevirt-operator-certs"
VirtApiCertSecretName = "kubevirt-virt-api-certs"
VirtControllerCertSecretName = "kubevirt-controller-certs"
VirtExportProxyCertSecretName = "kubevirt-exportproxy-certs"
VirtSynchronizationControllerCertSecretName = "kubevirt-synchronization-controller-certs"
VirtSynchronizationControllerServerCertSecretName = "kubevirt-synchronization-controller-server-certs"
CABundleKey = "ca-bundle"
LocalPodDNStemplateString = "%s.%s.pod.cluster.local"
CaClusterLocal = "cluster.local"
maxCertificatesInBundle = 50
)
type CertificateCreationCallback func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey)
var populationStrategy = map[string]CertificateCreationCallback{
KubeVirtCASecretName: func(secret *k8sv1.Secret, _ *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair, _ := triple.NewCA("kubevirt.io", duration)
return caKeyPair.Cert, caKeyPair.Key
},
KubeVirtExportCASecretName: func(secret *k8sv1.Secret, _ *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair, _ := triple.NewCA("export.kubevirt.io", duration)
return caKeyPair.Cert, caKeyPair.Key
},
VirtOperatorCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
keyPair, _ := triple.NewServerKeyPair(
caKeyPair,
fmt.Sprintf(LocalPodDNStemplateString, VirtOperatorServiceName, secret.Namespace),
VirtOperatorServiceName,
secret.Namespace,
CaClusterLocal,
nil,
nil,
duration,
)
return keyPair.Cert, keyPair.Key
},
VirtApiCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
keyPair, _ := triple.NewServerKeyPair(
caKeyPair,
fmt.Sprintf(LocalPodDNStemplateString, VirtApiServiceName, secret.Namespace),
VirtApiServiceName,
secret.Namespace,
CaClusterLocal,
nil,
nil,
duration,
)
return keyPair.Cert, keyPair.Key
},
VirtControllerCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
keyPair, _ := triple.NewServerKeyPair(
caKeyPair,
fmt.Sprintf(LocalPodDNStemplateString, VirtControllerServiceName, secret.Namespace),
VirtControllerServiceName,
secret.Namespace,
CaClusterLocal,
nil,
nil,
duration,
)
return keyPair.Cert, keyPair.Key
},
VirtHandlerServerCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
keyPair, _ := triple.NewServerKeyPair(
caKeyPair,
"kubevirt.io:system:node:virt-handler",
VirtHandlerServiceName,
secret.Namespace,
CaClusterLocal,
nil,
nil,
duration,
)
return keyPair.Cert, keyPair.Key
},
VirtHandlerMigrationClientCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
keyPair, _ := triple.NewClientKeyPair(
caKeyPair,
"kubevirt.io:system:client:migration",
nil,
duration,
)
return keyPair.Cert, keyPair.Key
},
VirtHandlerVsockClientCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
keyPair, _ := triple.NewClientKeyPair(
caKeyPair,
"kubevirt.io:system:client:vsock",
nil,
duration,
)
return keyPair.Cert, keyPair.Key
},
VirtHandlerCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
clientKeyPair, _ := triple.NewClientKeyPair(caKeyPair,
"kubevirt.io:system:client:virt-handler",
nil,
duration,
)
return clientKeyPair.Cert, clientKeyPair.Key
},
VirtExportProxyCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
keyPair, _ := triple.NewServerKeyPair(
caKeyPair,
fmt.Sprintf(LocalPodDNStemplateString, VirtExportProxyServiceName, secret.Namespace),
VirtExportProxyServiceName,
secret.Namespace,
CaClusterLocal,
nil,
nil,
duration,
)
return keyPair.Cert, keyPair.Key
},
VirtSynchronizationControllerCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
clientKeyPair, _ := triple.NewClientKeyPair(caKeyPair,
"kubevirt.io:system:client:virt-synchronization-controller",
nil,
duration,
)
return clientKeyPair.Cert, clientKeyPair.Key
},
VirtSynchronizationControllerServerCertSecretName: func(secret *k8sv1.Secret, caCert *tls.Certificate, duration time.Duration) (cert *x509.Certificate, key *ecdsa.PrivateKey) {
caKeyPair := &triple.KeyPair{
Key: caCert.PrivateKey.(*ecdsa.PrivateKey),
Cert: caCert.Leaf,
}
keyPair, _ := triple.NewServerKeyPair(
caKeyPair,
"kubevirt.io:system:node:virt-synchronization-controller",
VirtSynchronizationControllerServiceName,
secret.Namespace,
CaClusterLocal,
nil,
nil,
duration,
)
return keyPair.Cert, keyPair.Key
},
}
func PopulateSecretWithCertificate(secret *k8sv1.Secret, caCert *tls.Certificate, duration *metav1.Duration) (err error) {
strategy, ok := populationStrategy[secret.Name]
if !ok {
return fmt.Errorf("no certificate population strategy found for secret")
}
crt, certKey := strategy(secret, caCert, duration.Duration)
secret.Data = map[string][]byte{
bootstrap.CertBytesValue: cert.EncodeCertPEM(crt),
bootstrap.KeyBytesValue: cert.EncodePrivateKeyPEM(certKey),
}
if secret.Annotations == nil {
secret.Annotations = map[string]string{}
}
secret.Annotations["kubevirt.io/duration"] = duration.String()
return nil
}
func NewCACertSecrets(operatorNamespace string) []*k8sv1.Secret {
return []*k8sv1.Secret{
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: KubeVirtCASecretName,
Namespace: operatorNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: KubeVirtExportCASecretName,
Namespace: operatorNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
}
}
func NewCAConfigMaps(operatorNamespace string) []*k8sv1.ConfigMap {
return []*k8sv1.ConfigMap{
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: KubeVirtCASecretName,
Namespace: operatorNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: KubeVirtExportCASecretName,
Namespace: operatorNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: ExternalKubeVirtCAConfigMapName,
Namespace: operatorNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
},
}
}
func NewCertSecrets(installNamespace string, operatorNamespace string) []*k8sv1.Secret {
secrets := []*k8sv1.Secret{
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtApiCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtControllerCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtExportProxyCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtHandlerServerCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtHandlerMigrationClientCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtHandlerVsockClientCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtHandlerCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtSynchronizationControllerCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtSynchronizationControllerServerCertSecretName,
Namespace: installNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtOperatorCertSecretName,
Namespace: operatorNamespace,
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Type: k8sv1.SecretTypeTLS,
},
}
return secrets
}
// nextRotationDeadline returns a value for the threshold at which the
// current certificate should be rotated, 80% of the expiration of the
// certificate.
func NextRotationDeadline(cert *tls.Certificate, ca *tls.Certificate, renewBefore *metav1.Duration, caRenewBefore *metav1.Duration) time.Time {
if cert == nil {
return time.Now()
}
if ca != nil {
certPool := x509.NewCertPool()
certPool.AddCert(ca.Leaf)
_, err := cert.Leaf.Verify(x509.VerifyOptions{
Roots: certPool,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
})
if err != nil {
log.DefaultLogger().Reason(err).Infof("The certificate with common name '%s' is not signed with the supplied CA. Triggering a rotation.", cert.Leaf.Subject.CommonName)
return time.Now()
}
}
certNotAfter := cert.Leaf.NotAfter
deadline := cert.Leaf.NotAfter.Add(-renewBefore.Duration)
if ca != nil {
caNotAfter := ca.Leaf.NotAfter
if caNotAfter.Before(certNotAfter) {
log.DefaultLogger().Infof("The certificate with common name '%s' expires after the supplied CA does. Scheduling rotation based on CA's lifetime.", cert.Leaf.Subject.CommonName)
deadline = caNotAfter
if caRenewBefore != nil {
// Set cert rotation for the middle of the period of time when CA's overlap
deadline = caNotAfter.Add(-time.Duration(float64(caRenewBefore.Duration) * 0.5))
}
}
}
log.DefaultLogger().V(4).Infof("Certificate with common name '%s' expiration is %v, rotation deadline is %v", cert.Leaf.Subject.CommonName, certNotAfter, deadline)
return deadline
}
func ValidateSecret(secret *k8sv1.Secret) error {
if _, ok := secret.Data[bootstrap.CertBytesValue]; !ok {
return fmt.Errorf("%s value not found in %s secret\n", bootstrap.CertBytesValue, secret.Name)
}
if _, ok := secret.Data[bootstrap.KeyBytesValue]; !ok {
return fmt.Errorf("%s value not found in %s secret\n", bootstrap.KeyBytesValue, secret.Name)
}
return nil
}
func LoadCertificates(secret *k8sv1.Secret) (serverCrt *tls.Certificate, err error) {
if err := ValidateSecret(secret); err != nil {
return nil, err
}
crt, err := tls.X509KeyPair(secret.Data[bootstrap.CertBytesValue], secret.Data[bootstrap.KeyBytesValue])
if err != nil {
return nil, fmt.Errorf("failed to load certificate: %v\n", err)
}
leaf, err := cert.ParseCertsPEM(secret.Data[bootstrap.CertBytesValue])
if err != nil {
return nil, fmt.Errorf("failed to load leaf certificate: %v\n", err)
}
crt.Leaf = leaf[0]
return &crt, nil
}
// filterValidCertificates filters out certificates that are not valid and sorts them by age.
// If there are more than maxCount, it truncates the list to maxCount
func filterValidCertificates(certs []*x509.Certificate, now time.Time, maxCount int) []*x509.Certificate {
validCerts := make([]*x509.Certificate, 0, len(certs))
for _, crt := range certs {
if !crt.NotAfter.Before(now) {
validCerts = append(validCerts, crt)
}
}
sort.SliceStable(validCerts, func(i, j int) bool {
return validCerts[i].NotBefore.Unix() > validCerts[j].NotBefore.Unix()
})
if len(validCerts) > maxCount {
log.Log.Warningf("more than %d CA certificates found in the CA bundle, truncating to %d", maxCount, maxCount)
return validCerts[:maxCount]
}
return validCerts
}
func MergeCABundle(currentCert *tls.Certificate, currentBundle []byte, overlapDuration time.Duration) ([]byte, int, error) {
current := cert.EncodeCertPEM(currentCert.Leaf)
certs, err := cert.ParseCertsPEM(currentBundle)
if err != nil {
return nil, 0, err
}
now := time.Now()
validCerts := filterValidCertificates(certs, now, maxCertificatesInBundle)
var newBundle []byte
certCount := 0
// we check for every cert i > 0, if in context to the certificate i-1 it existed already longer than the overlap
// duration. We check the certificate i = 0 against the current certificate.
for i, crt := range validCerts {
if i == 0 {
if currentCert.Leaf.NotBefore.Add(overlapDuration).Before(now) {
log.DefaultLogger().Infof("Kept old CA certificates for a duration of at least %v, dropping them now.", overlapDuration)
break
}
} else {
if validCerts[i-1].NotBefore.Add(overlapDuration).Before(now) {
log.DefaultLogger().Infof("Kept old CA certificates for a duration of at least %v, dropping them now.", overlapDuration)
break
}
}
certBytes := cert.EncodeCertPEM(crt)
// don't add the current CA multiple times
if reflect.DeepEqual(certBytes, current) {
continue
}
newBundle = append(newBundle, certBytes...)
certCount++
}
newBundle = append(current, newBundle...)
certCount++
return newBundle, certCount, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package components
import (
"fmt"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/pointer"
)
const (
validatingAdmissionPolicyBindingName = "kubevirt-node-restriction-binding"
validatingAdmissionPolicyName = "kubevirt-node-restriction-policy"
nodeRestrictionAppLabelValue = "kubevirt-node-restriction"
NodeRestrictionErrModifyAnother = "this user cannot modify this node"
NodeRestrictionErrModifySpec = "this user cannot modify spec of node"
NodeRestrictionErrChangeMetadataFields = "this user can only change allowed metadata fields"
NodeRestrictionErrAddDeleteLabels = "this user cannot add/delete non kubevirt-owned labels"
NodeRestrictionErrUpdateLabels = "this user cannot update non kubevirt-owned labels"
NodeRestrictionErrAddDeleteAnnotations = "this user cannot add/delete non kubevirt-owned annotations"
NodeRestrictionErrUpdateAnnotations = "this user cannot update non kubevirt-owned annotations"
)
func NewHandlerV1ValidatingAdmissionPolicyBinding() *admissionregistrationv1.ValidatingAdmissionPolicyBinding {
return &admissionregistrationv1.ValidatingAdmissionPolicyBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ValidatingAdmissionPolicyBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: validatingAdmissionPolicyBindingName,
Labels: map[string]string{
v1.AppLabel: nodeRestrictionAppLabelValue,
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
},
},
Spec: admissionregistrationv1.ValidatingAdmissionPolicyBindingSpec{
PolicyName: validatingAdmissionPolicyName,
ValidationActions: []admissionregistrationv1.ValidationAction{
admissionregistrationv1.Deny,
},
MatchResources: &admissionregistrationv1.MatchResources{
ResourceRules: []admissionregistrationv1.NamedRuleWithOperations{
{
RuleWithOperations: admissionregistrationv1.RuleWithOperations{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.OperationAll,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{""},
APIVersions: []string{"*"},
Resources: []string{"nodes"},
},
},
},
},
},
},
}
}
func NewHandlerV1ValidatingAdmissionPolicy(virtHandlerServiceAccount string) *admissionregistrationv1.ValidatingAdmissionPolicy {
return &admissionregistrationv1.ValidatingAdmissionPolicy{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ValidatingAdmissionPolicy",
},
ObjectMeta: metav1.ObjectMeta{
Name: validatingAdmissionPolicyName,
},
Spec: admissionregistrationv1.ValidatingAdmissionPolicySpec{
FailurePolicy: pointer.P(admissionregistrationv1.Fail),
MatchConstraints: &admissionregistrationv1.MatchResources{
ResourceRules: []admissionregistrationv1.NamedRuleWithOperations{
{
RuleWithOperations: admissionregistrationv1.RuleWithOperations{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{""},
APIVersions: []string{"*"},
Resources: []string{"nodes"},
},
},
},
},
},
MatchConditions: []admissionregistrationv1.MatchCondition{
{
Name: "virt-handler-user-only",
Expression: fmt.Sprintf("request.userInfo.username == %q", virtHandlerServiceAccount),
},
},
Variables: []admissionregistrationv1.Variable{
{
Name: "oldNonKubevirtLabels",
Expression: `oldObject.metadata.labels.filter(k, !k.contains("kubevirt.io") && k != "cpumanager")`,
},
{
Name: "oldLabels",
Expression: "oldObject.metadata.labels",
},
{
Name: "newNonKubevirtLabels",
Expression: `object.metadata.labels.filter(k, !k.contains("kubevirt.io") && k != "cpumanager")`,
},
{
Name: "newLabels",
Expression: "object.metadata.labels",
},
{
Name: "oldNonKubevirtAnnotations",
Expression: `oldObject.metadata.annotations.filter(k, !k.contains("kubevirt.io"))`,
},
{
Name: "newNonKubevirtAnnotations",
Expression: `object.metadata.annotations.filter(k, !k.contains("kubevirt.io"))`,
},
{
Name: "oldAnnotations",
Expression: "oldObject.metadata.annotations",
},
{
Name: "newAnnotations",
Expression: "object.metadata.annotations",
},
{
Name: "requestMatchNode",
Expression: "object.metadata.name == request.userInfo.extra['authentication.kubernetes.io/node-name'][0]",
},
{
Name: "hasNode",
Expression: "('authentication.kubernetes.io/node-name' in request.userInfo.extra)",
},
},
Validations: []admissionregistrationv1.Validation{
{
Expression: "variables.hasNode && variables.requestMatchNode",
Message: NodeRestrictionErrModifyAnother,
Reason: pointer.P(metav1.StatusReasonForbidden),
},
{
Expression: "object.spec == oldObject.spec",
Message: NodeRestrictionErrModifySpec,
Reason: pointer.P(metav1.StatusReasonForbidden),
},
{
Expression: `oldObject.metadata.filter(k, k != "labels" && k != "annotations" && k != "managedFields" && k != "resourceVersion").all(k, k in object.metadata) && object.metadata.filter(k, k != "labels" && k != "annotations" && k != "managedFields" && k != "resourceVersion").all(k, k in oldObject.metadata && oldObject.metadata[k] == object.metadata[k])`,
Message: NodeRestrictionErrChangeMetadataFields,
Reason: pointer.P(metav1.StatusReasonForbidden),
},
{
Expression: `size(variables.newNonKubevirtLabels) == size(variables.oldNonKubevirtLabels)`,
Message: NodeRestrictionErrAddDeleteLabels,
Reason: pointer.P(metav1.StatusReasonForbidden),
},
{
Expression: `variables.newNonKubevirtLabels.all(k, k in variables.oldNonKubevirtLabels && variables.newLabels[k] == variables.oldLabels[k])`,
Message: NodeRestrictionErrUpdateLabels,
Reason: pointer.P(metav1.StatusReasonForbidden),
},
{
Expression: `size(variables.newNonKubevirtAnnotations) == size(variables.oldNonKubevirtAnnotations)`,
Message: NodeRestrictionErrAddDeleteAnnotations,
Reason: pointer.P(metav1.StatusReasonForbidden),
},
{
Expression: `variables.newNonKubevirtAnnotations.all(k, k in variables.oldNonKubevirtAnnotations && variables.newAnnotations[k] == variables.oldAnnotations[k])`,
Message: NodeRestrictionErrUpdateAnnotations,
Reason: pointer.P(metav1.StatusReasonForbidden),
},
},
},
}
}
package components
import (
"fmt"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"kubevirt.io/kubevirt/pkg/pointer"
clonebase "kubevirt.io/api/clone"
clone "kubevirt.io/api/clone/v1beta1"
"kubevirt.io/api/instancetype"
"kubevirt.io/api/core"
"kubevirt.io/api/migrations"
migrationsv1 "kubevirt.io/api/migrations/v1alpha1"
backupv1 "kubevirt.io/api/backup/v1alpha1"
virtv1 "kubevirt.io/api/core/v1"
exportv1 "kubevirt.io/api/export/v1beta1"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
poolv1 "kubevirt.io/api/pool/v1beta1"
snapshotv1 "kubevirt.io/api/snapshot/v1beta1"
)
var sideEffectNone = admissionregistrationv1.SideEffectClassNone
var sideEffectNoneOnDryRun = admissionregistrationv1.SideEffectClassNoneOnDryRun
const certificatesSecretAnnotationKey = "certificates.kubevirt.io/secret"
var defaultTimeoutSeconds = int32(10)
func NewOperatorWebhookService(operatorNamespace string) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: operatorNamespace,
Name: KubevirtOperatorWebhookServiceName,
Labels: map[string]string{
virtv1.AppLabel: "",
"prometheus.kubevirt.io": prometheusLabelValue,
},
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
"kubevirt.io": "virt-operator",
},
Ports: []corev1.ServicePort{
{
Name: "webhooks",
Port: 443,
TargetPort: intstr.IntOrString{
Type: intstr.String,
StrVal: "webhooks",
},
Protocol: corev1.ProtocolTCP,
},
},
Type: corev1.ServiceTypeClusterIP,
},
}
}
func NewOpertorValidatingWebhookConfiguration(operatorNamespace string) *admissionregistrationv1.ValidatingWebhookConfiguration {
failurePolicy := admissionregistrationv1.Fail
path := "/kubevirt-validate-delete"
kubevirtUpdatePath := KubeVirtUpdateValidatePath
return &admissionregistrationv1.ValidatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: admissionregistrationv1.SchemeGroupVersion.String(),
Kind: "ValidatingWebhookConfiguration",
},
ObjectMeta: metav1.ObjectMeta{
Name: KubeVirtOperatorValidatingWebhookName,
Labels: map[string]string{
virtv1.AppLabel: KubeVirtOperatorValidatingWebhookName,
},
Annotations: map[string]string{
certificatesSecretAnnotationKey: "kubevirt-operator-certs",
},
},
Webhooks: []admissionregistrationv1.ValidatingWebhook{
{
Name: "kubevirt-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: operatorNamespace,
Name: VirtOperatorServiceName,
Path: &path,
},
},
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Delete,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"kubevirts"},
},
}},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
},
{
Name: "kubevirt-update-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"kubevirts"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: operatorNamespace,
Name: VirtOperatorServiceName,
Path: &kubevirtUpdatePath,
},
},
},
{
Name: "kubevirt-create-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"kubevirts"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: operatorNamespace,
Name: VirtOperatorServiceName,
Path: pointer.P(KubeVirtCreateValidatePath),
},
},
},
},
}
}
func NewVirtAPIMutatingWebhookConfiguration(installNamespace string) *admissionregistrationv1.MutatingWebhookConfiguration {
vmPath := VMMutatePath
vmiPath := VMIMutatePath
migrationPath := MigrationMutatePath
failurePolicy := admissionregistrationv1.Fail
return &admissionregistrationv1.MutatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: admissionregistrationv1.SchemeGroupVersion.String(),
Kind: "MutatingWebhookConfiguration",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtAPIMutatingWebhookName,
Labels: map[string]string{
virtv1.AppLabel: VirtAPIMutatingWebhookName,
virtv1.ManagedByLabel: virtv1.ManagedByLabelOperatorValue,
},
Annotations: map[string]string{
certificatesSecretAnnotationKey: VirtApiCertSecretName,
},
},
Webhooks: []admissionregistrationv1.MutatingWebhook{
{
Name: "virtualmachines-mutator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
SideEffects: &sideEffectNone,
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachines"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmPath,
},
},
},
{
Name: "virtualmachineinstances-mutator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
SideEffects: &sideEffectNone,
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachineinstances"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmiPath,
},
},
},
{
Name: "migrations-mutator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
SideEffects: &sideEffectNone,
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachineinstancemigrations"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &migrationPath,
},
},
},
{
Name: fmt.Sprintf("%s-mutator.kubevirt.io", clonebase.ResourceVMClonePlural),
AdmissionReviewVersions: []string{"v1"},
SideEffects: &sideEffectNone,
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{clonebase.GroupName},
APIVersions: clonebase.ApiSupportedWebhookVersions,
Resources: []string{clonebase.ResourceVMClonePlural},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: pointer.P(VMCloneCreateMutatePath),
},
},
},
},
}
}
func NewVirtAPIValidatingWebhookConfiguration(installNamespace string) *admissionregistrationv1.ValidatingWebhookConfiguration {
vmiPathCreate := VMICreateValidatePath
vmiPathUpdate := VMIUpdateValidatePath
vmPath := VMValidatePath
vmirsPath := VMIRSValidatePath
vmpoolPath := VMPoolValidatePath
vmipresetPath := VMIPresetValidatePath
migrationCreatePath := MigrationCreateValidatePath
migrationUpdatePath := MigrationUpdateValidatePath
vmSnapshotValidatePath := VMSnapshotValidatePath
vmRestoreValidatePath := VMRestoreValidatePath
vmBackupValidatePath := VMBackupValidatePath
vmBackupTrackerValidatePath := VMBackupTrackerValidatePath
vmExportValidatePath := VMExportValidatePath
VmInstancetypeValidatePath := VMInstancetypeValidatePath
VmClusterInstancetypeValidatePath := VMClusterInstancetypeValidatePath
vmPreferenceValidatePath := VMPreferenceValidatePath
vmClusterPreferenceValidatePath := VMClusterPreferenceValidatePath
podEvictionValidatePath := PodEvictionValidatePath
statusValidatePath := StatusValidatePath
migrationPolicyCreateValidatePath := MigrationPolicyCreateValidatePath
vmCloneCreateValidatePath := VMCloneCreateValidatePath
failurePolicy := admissionregistrationv1.Fail
return &admissionregistrationv1.ValidatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: admissionregistrationv1.SchemeGroupVersion.String(),
Kind: "ValidatingWebhookConfiguration",
},
ObjectMeta: metav1.ObjectMeta{
Name: VirtAPIValidatingWebhookName,
Labels: map[string]string{
virtv1.AppLabel: VirtAPIValidatingWebhookName,
virtv1.ManagedByLabel: virtv1.ManagedByLabelOperatorValue,
},
Annotations: map[string]string{
certificatesSecretAnnotationKey: VirtApiCertSecretName,
},
},
Webhooks: []admissionregistrationv1.ValidatingWebhook{
{
Name: "virt-launcher-eviction-interceptor.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
// We don't want to block evictions in the cluster in a case where this webhook is down.
// The eviction of virt-launcher will still be protected by our pdb.
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNoneOnDryRun,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.OperationAll,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods/eviction"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &podEvictionValidatePath,
},
},
MatchConditions: []admissionregistrationv1.MatchCondition{
{
Name: "only-vms",
Expression: `object.metadata.name.startsWith("virt-launcher")`,
},
},
},
{
Name: "hotplug-pod-eviction-interceptor.kubevirt.io",
AdmissionReviewVersions: []string{"v1", "v1beta1"},
// We don't want to block evictions in the cluster in a case where this webhook is down.
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNoneOnDryRun,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.OperationAll,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods/eviction"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &podEvictionValidatePath,
},
},
MatchConditions: []admissionregistrationv1.MatchCondition{
{
Name: "only-hotplug-pods",
Expression: `object.metadata.name.startsWith("hp-volume-")`,
},
},
},
{
Name: "virtualmachineinstances-create-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachineinstances"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmiPathCreate,
},
},
},
{
Name: "virtualmachineinstances-update-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachineinstances"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmiPathUpdate,
},
},
},
{
Name: "virtualmachine-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachines"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmPath,
},
},
},
{
Name: "virtualmachinereplicaset-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachineinstancereplicasets"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmirsPath,
},
},
},
{
Name: "virtualmachinepool-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{poolv1.SchemeGroupVersion.Group},
APIVersions: []string{poolv1.SchemeGroupVersion.Version},
Resources: []string{"virtualmachinepools"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmpoolPath,
},
},
},
{
Name: "virtualmachinepreset-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachineinstancepresets"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmipresetPath,
},
},
},
{
Name: "migration-create-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachineinstancemigrations"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &migrationCreatePath,
},
},
},
{
Name: "migration-update-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{"virtualmachineinstancemigrations"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &migrationUpdatePath,
},
},
},
{
Name: "virtualmachinesnapshot-validator.snapshot.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{snapshotv1.SchemeGroupVersion.Group},
APIVersions: []string{snapshotv1.SchemeGroupVersion.Version},
Resources: []string{"virtualmachinesnapshots"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmSnapshotValidatePath,
},
},
},
{
Name: "virtualmachinerestore-validator.snapshot.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
SideEffects: &sideEffectNone,
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{snapshotv1.SchemeGroupVersion.Group},
APIVersions: []string{snapshotv1.SchemeGroupVersion.Version},
Resources: []string{"virtualmachinerestores"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmRestoreValidatePath,
},
},
},
{
Name: "virtualmachinebackup-validator.backup.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{backupv1.SchemeGroupVersion.Group},
APIVersions: []string{backupv1.SchemeGroupVersion.Version},
Resources: []string{"virtualmachinebackups"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmBackupValidatePath,
},
},
},
{
Name: "virtualmachinebackuptracker-validator.backup.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{backupv1.SchemeGroupVersion.Group},
APIVersions: []string{backupv1.SchemeGroupVersion.Version},
Resources: []string{"virtualmachinebackuptrackers"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmBackupTrackerValidatePath,
},
},
},
{
Name: "virtualmachineexport-validator.export.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{exportv1.SchemeGroupVersion.Group},
APIVersions: []string{exportv1.SchemeGroupVersion.Version},
Resources: []string{"virtualmachineexports"},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmExportValidatePath,
},
},
},
{
Name: "virtualmachineinstancetype-validator.instancetype.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{instancetypev1beta1.SchemeGroupVersion.Group},
APIVersions: []string{
instancetypev1beta1.SchemeGroupVersion.Version,
},
Resources: []string{instancetype.PluralResourceName},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &VmInstancetypeValidatePath,
},
},
},
{
Name: "virtualmachineclusterinstancetype-validator.instancetype.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{instancetypev1beta1.SchemeGroupVersion.Group},
APIVersions: []string{
instancetypev1beta1.SchemeGroupVersion.Version,
},
Resources: []string{instancetype.ClusterPluralResourceName},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &VmClusterInstancetypeValidatePath,
},
},
},
{
Name: "virtualmachinepreference-validator.instancetype.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{instancetypev1beta1.SchemeGroupVersion.Group},
APIVersions: []string{
instancetypev1beta1.SchemeGroupVersion.Version,
},
Resources: []string{instancetype.PluralPreferenceResourceName},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmPreferenceValidatePath,
},
},
},
{
Name: "virtualmachineclusterpreference-validator.instancetype.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{instancetypev1beta1.SchemeGroupVersion.Group},
APIVersions: []string{
instancetypev1beta1.SchemeGroupVersion.Version,
},
Resources: []string{instancetype.ClusterPluralPreferenceResourceName},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmClusterPreferenceValidatePath,
},
},
},
{
Name: "kubevirt-crd-status-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{core.GroupName},
APIVersions: virtv1.ApiSupportedWebhookVersions,
Resources: []string{
"virtualmachines/status",
"virtualmachineinstancereplicasets/status",
"virtualmachineinstancemigrations/status",
},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &statusValidatePath,
},
},
},
{
Name: "migration-policy-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{migrationsv1.SchemeGroupVersion.Group},
APIVersions: []string{migrationsv1.SchemeGroupVersion.Version},
Resources: []string{migrations.ResourceMigrationPolicies},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &migrationPolicyCreateValidatePath,
},
},
},
{
Name: "vm-clone-validator.kubevirt.io",
AdmissionReviewVersions: []string{"v1"},
FailurePolicy: &failurePolicy,
TimeoutSeconds: &defaultTimeoutSeconds,
SideEffects: &sideEffectNone,
Rules: []admissionregistrationv1.RuleWithOperations{{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{clone.SchemeGroupVersion.Group},
APIVersions: []string{clone.SchemeGroupVersion.Version},
Resources: []string{clonebase.ResourceVMClonePlural},
},
}},
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Namespace: installNamespace,
Name: VirtApiServiceName,
Path: &vmCloneCreateValidatePath,
},
},
},
},
}
}
const KubeVirtUpdateValidatePath = "/kubevirt-validate-update"
const KubeVirtCreateValidatePath = "/kubevirt-validate-create"
const VMICreateValidatePath = "/virtualmachineinstances-validate-create"
const VMIUpdateValidatePath = "/virtualmachineinstances-validate-update"
const VMValidatePath = "/virtualmachines-validate"
const VMIRSValidatePath = "/virtualmachinereplicaset-validate"
const VMPoolValidatePath = "/virtualmachinepool-validate"
const VMIPresetValidatePath = "/vmipreset-validate"
const MigrationCreateValidatePath = "/migration-validate-create"
const MigrationUpdateValidatePath = "/migration-validate-update"
const VMMutatePath = "/virtualmachines-mutate"
const VMIMutatePath = "/virtualmachineinstances-mutate"
const MigrationMutatePath = "/migration-mutate-create"
const VirtApiServiceName = "virt-api"
const VirtControllerServiceName = "virt-controller"
const VirtHandlerServiceName = "virt-handler"
const VirtExportProxyServiceName = "virt-exportproxy"
const VirtSynchronizationControllerServiceName = "virt-synchronization-controller"
const VirtAPIValidatingWebhookName = "virt-api-validator"
const VirtOperatorServiceName = "kubevirt-operator-webhook"
const VirtAPIMutatingWebhookName = "virt-api-mutator"
const KubevirtOperatorWebhookServiceName = "kubevirt-operator-webhook"
const KubeVirtOperatorValidatingWebhookName = "virt-operator-validator"
const VMSnapshotValidatePath = "/virtualmachinesnapshots-validate"
const VMRestoreValidatePath = "/virtualmachinerestores-validate"
const VMBackupValidatePath = "/virtualmachinebackups-validate"
const VMBackupTrackerValidatePath = "/virtualmachinebackuptrackers-validate"
const VMExportValidatePath = "/virtualmachineexports-validate"
const VMInstancetypeValidatePath = "/virtualmachineinstancetypes-validate"
const VMClusterInstancetypeValidatePath = "/virtualmachineclusterinstancetypes-validate"
const VMPreferenceValidatePath = "/virtualmachinepreferences-validate"
const VMClusterPreferenceValidatePath = "/virtualmachineclusterpreferences-validate"
const StatusValidatePath = "/status-validate"
const PodEvictionValidatePath = "/pod-eviction-validate"
const MigrationPolicyCreateValidatePath = "/migration-policy-validate-create"
const VMCloneCreateValidatePath = "/vm-clone-validate-create"
const VMCloneCreateMutatePath = "/vm-clone-mutate-create"
// Code generated by MockGen. DO NOT EDIT.
// Source: strategy.go
//
// Generated by this command:
//
// mockgen -source strategy.go -imports libvirt=libvirt.org/go/libvirt -package=install -destination=generated_mock_strategy.go
//
// Package install is a generated GoMock package.
package install
import (
context "context"
reflect "reflect"
v1 "github.com/openshift/api/route/v1"
v10 "github.com/openshift/api/security/v1"
v11 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
gomock "go.uber.org/mock/gomock"
v12 "k8s.io/api/admissionregistration/v1"
v13 "k8s.io/api/apps/v1"
v14 "k8s.io/api/core/v1"
v15 "k8s.io/api/rbac/v1"
v16 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
v17 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
v18 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
v1beta1 "kubevirt.io/api/instancetype/v1beta1"
)
// MockAPIServiceInterface is a mock of APIServiceInterface interface.
type MockAPIServiceInterface struct {
ctrl *gomock.Controller
recorder *MockAPIServiceInterfaceMockRecorder
isgomock struct{}
}
// MockAPIServiceInterfaceMockRecorder is the mock recorder for MockAPIServiceInterface.
type MockAPIServiceInterfaceMockRecorder struct {
mock *MockAPIServiceInterface
}
// NewMockAPIServiceInterface creates a new mock instance.
func NewMockAPIServiceInterface(ctrl *gomock.Controller) *MockAPIServiceInterface {
mock := &MockAPIServiceInterface{ctrl: ctrl}
mock.recorder = &MockAPIServiceInterfaceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockAPIServiceInterface) EXPECT() *MockAPIServiceInterfaceMockRecorder {
return m.recorder
}
// Create mocks base method.
func (m *MockAPIServiceInterface) Create(ctx context.Context, apiService *v18.APIService, opts v17.CreateOptions) (*v18.APIService, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Create", ctx, apiService, opts)
ret0, _ := ret[0].(*v18.APIService)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Create indicates an expected call of Create.
func (mr *MockAPIServiceInterfaceMockRecorder) Create(ctx, apiService, opts any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockAPIServiceInterface)(nil).Create), ctx, apiService, opts)
}
// Delete mocks base method.
func (m *MockAPIServiceInterface) Delete(ctx context.Context, name string, options v17.DeleteOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, name, options)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockAPIServiceInterfaceMockRecorder) Delete(ctx, name, options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAPIServiceInterface)(nil).Delete), ctx, name, options)
}
// Get mocks base method.
func (m *MockAPIServiceInterface) Get(ctx context.Context, name string, options v17.GetOptions) (*v18.APIService, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", ctx, name, options)
ret0, _ := ret[0].(*v18.APIService)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockAPIServiceInterfaceMockRecorder) Get(ctx, name, options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockAPIServiceInterface)(nil).Get), ctx, name, options)
}
// Patch mocks base method.
func (m *MockAPIServiceInterface) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v17.PatchOptions, subresources ...string) (*v18.APIService, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, name, pt, data, opts}
for _, a := range subresources {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Patch", varargs...)
ret0, _ := ret[0].(*v18.APIService)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Patch indicates an expected call of Patch.
func (mr *MockAPIServiceInterfaceMockRecorder) Patch(ctx, name, pt, data, opts any, subresources ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, name, pt, data, opts}, subresources...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockAPIServiceInterface)(nil).Patch), varargs...)
}
// MockStrategyInterface is a mock of StrategyInterface interface.
type MockStrategyInterface struct {
ctrl *gomock.Controller
recorder *MockStrategyInterfaceMockRecorder
isgomock struct{}
}
// MockStrategyInterfaceMockRecorder is the mock recorder for MockStrategyInterface.
type MockStrategyInterfaceMockRecorder struct {
mock *MockStrategyInterface
}
// NewMockStrategyInterface creates a new mock instance.
func NewMockStrategyInterface(ctrl *gomock.Controller) *MockStrategyInterface {
mock := &MockStrategyInterface{ctrl: ctrl}
mock.recorder = &MockStrategyInterfaceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockStrategyInterface) EXPECT() *MockStrategyInterfaceMockRecorder {
return m.recorder
}
// APIServices mocks base method.
func (m *MockStrategyInterface) APIServices() []*v18.APIService {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "APIServices")
ret0, _ := ret[0].([]*v18.APIService)
return ret0
}
// APIServices indicates an expected call of APIServices.
func (mr *MockStrategyInterfaceMockRecorder) APIServices() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "APIServices", reflect.TypeOf((*MockStrategyInterface)(nil).APIServices))
}
// ApiDeployments mocks base method.
func (m *MockStrategyInterface) ApiDeployments() []*v13.Deployment {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApiDeployments")
ret0, _ := ret[0].([]*v13.Deployment)
return ret0
}
// ApiDeployments indicates an expected call of ApiDeployments.
func (mr *MockStrategyInterfaceMockRecorder) ApiDeployments() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApiDeployments", reflect.TypeOf((*MockStrategyInterface)(nil).ApiDeployments))
}
// CRDs mocks base method.
func (m *MockStrategyInterface) CRDs() []*v16.CustomResourceDefinition {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CRDs")
ret0, _ := ret[0].([]*v16.CustomResourceDefinition)
return ret0
}
// CRDs indicates an expected call of CRDs.
func (mr *MockStrategyInterfaceMockRecorder) CRDs() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CRDs", reflect.TypeOf((*MockStrategyInterface)(nil).CRDs))
}
// CertificateSecrets mocks base method.
func (m *MockStrategyInterface) CertificateSecrets() []*v14.Secret {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CertificateSecrets")
ret0, _ := ret[0].([]*v14.Secret)
return ret0
}
// CertificateSecrets indicates an expected call of CertificateSecrets.
func (mr *MockStrategyInterfaceMockRecorder) CertificateSecrets() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CertificateSecrets", reflect.TypeOf((*MockStrategyInterface)(nil).CertificateSecrets))
}
// ClusterRoleBindings mocks base method.
func (m *MockStrategyInterface) ClusterRoleBindings() []*v15.ClusterRoleBinding {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClusterRoleBindings")
ret0, _ := ret[0].([]*v15.ClusterRoleBinding)
return ret0
}
// ClusterRoleBindings indicates an expected call of ClusterRoleBindings.
func (mr *MockStrategyInterfaceMockRecorder) ClusterRoleBindings() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRoleBindings", reflect.TypeOf((*MockStrategyInterface)(nil).ClusterRoleBindings))
}
// ClusterRoles mocks base method.
func (m *MockStrategyInterface) ClusterRoles() []*v15.ClusterRole {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClusterRoles")
ret0, _ := ret[0].([]*v15.ClusterRole)
return ret0
}
// ClusterRoles indicates an expected call of ClusterRoles.
func (mr *MockStrategyInterfaceMockRecorder) ClusterRoles() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRoles", reflect.TypeOf((*MockStrategyInterface)(nil).ClusterRoles))
}
// ConfigMaps mocks base method.
func (m *MockStrategyInterface) ConfigMaps() []*v14.ConfigMap {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ConfigMaps")
ret0, _ := ret[0].([]*v14.ConfigMap)
return ret0
}
// ConfigMaps indicates an expected call of ConfigMaps.
func (mr *MockStrategyInterfaceMockRecorder) ConfigMaps() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigMaps", reflect.TypeOf((*MockStrategyInterface)(nil).ConfigMaps))
}
// ControllerDeployments mocks base method.
func (m *MockStrategyInterface) ControllerDeployments() []*v13.Deployment {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ControllerDeployments")
ret0, _ := ret[0].([]*v13.Deployment)
return ret0
}
// ControllerDeployments indicates an expected call of ControllerDeployments.
func (mr *MockStrategyInterfaceMockRecorder) ControllerDeployments() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerDeployments", reflect.TypeOf((*MockStrategyInterface)(nil).ControllerDeployments))
}
// DaemonSets mocks base method.
func (m *MockStrategyInterface) DaemonSets() []*v13.DaemonSet {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DaemonSets")
ret0, _ := ret[0].([]*v13.DaemonSet)
return ret0
}
// DaemonSets indicates an expected call of DaemonSets.
func (mr *MockStrategyInterfaceMockRecorder) DaemonSets() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DaemonSets", reflect.TypeOf((*MockStrategyInterface)(nil).DaemonSets))
}
// Deployments mocks base method.
func (m *MockStrategyInterface) Deployments() []*v13.Deployment {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Deployments")
ret0, _ := ret[0].([]*v13.Deployment)
return ret0
}
// Deployments indicates an expected call of Deployments.
func (mr *MockStrategyInterfaceMockRecorder) Deployments() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Deployments", reflect.TypeOf((*MockStrategyInterface)(nil).Deployments))
}
// ExportProxyDeployments mocks base method.
func (m *MockStrategyInterface) ExportProxyDeployments() []*v13.Deployment {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportProxyDeployments")
ret0, _ := ret[0].([]*v13.Deployment)
return ret0
}
// ExportProxyDeployments indicates an expected call of ExportProxyDeployments.
func (mr *MockStrategyInterfaceMockRecorder) ExportProxyDeployments() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportProxyDeployments", reflect.TypeOf((*MockStrategyInterface)(nil).ExportProxyDeployments))
}
// Instancetypes mocks base method.
func (m *MockStrategyInterface) Instancetypes() []*v1beta1.VirtualMachineClusterInstancetype {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Instancetypes")
ret0, _ := ret[0].([]*v1beta1.VirtualMachineClusterInstancetype)
return ret0
}
// Instancetypes indicates an expected call of Instancetypes.
func (mr *MockStrategyInterfaceMockRecorder) Instancetypes() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Instancetypes", reflect.TypeOf((*MockStrategyInterface)(nil).Instancetypes))
}
// MutatingWebhookConfigurations mocks base method.
func (m *MockStrategyInterface) MutatingWebhookConfigurations() []*v12.MutatingWebhookConfiguration {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MutatingWebhookConfigurations")
ret0, _ := ret[0].([]*v12.MutatingWebhookConfiguration)
return ret0
}
// MutatingWebhookConfigurations indicates an expected call of MutatingWebhookConfigurations.
func (mr *MockStrategyInterfaceMockRecorder) MutatingWebhookConfigurations() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MutatingWebhookConfigurations", reflect.TypeOf((*MockStrategyInterface)(nil).MutatingWebhookConfigurations))
}
// Preferences mocks base method.
func (m *MockStrategyInterface) Preferences() []*v1beta1.VirtualMachineClusterPreference {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Preferences")
ret0, _ := ret[0].([]*v1beta1.VirtualMachineClusterPreference)
return ret0
}
// Preferences indicates an expected call of Preferences.
func (mr *MockStrategyInterfaceMockRecorder) Preferences() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Preferences", reflect.TypeOf((*MockStrategyInterface)(nil).Preferences))
}
// PrometheusRules mocks base method.
func (m *MockStrategyInterface) PrometheusRules() []*v11.PrometheusRule {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PrometheusRules")
ret0, _ := ret[0].([]*v11.PrometheusRule)
return ret0
}
// PrometheusRules indicates an expected call of PrometheusRules.
func (mr *MockStrategyInterfaceMockRecorder) PrometheusRules() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrometheusRules", reflect.TypeOf((*MockStrategyInterface)(nil).PrometheusRules))
}
// RoleBindings mocks base method.
func (m *MockStrategyInterface) RoleBindings() []*v15.RoleBinding {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RoleBindings")
ret0, _ := ret[0].([]*v15.RoleBinding)
return ret0
}
// RoleBindings indicates an expected call of RoleBindings.
func (mr *MockStrategyInterfaceMockRecorder) RoleBindings() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoleBindings", reflect.TypeOf((*MockStrategyInterface)(nil).RoleBindings))
}
// Roles mocks base method.
func (m *MockStrategyInterface) Roles() []*v15.Role {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Roles")
ret0, _ := ret[0].([]*v15.Role)
return ret0
}
// Roles indicates an expected call of Roles.
func (mr *MockStrategyInterfaceMockRecorder) Roles() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Roles", reflect.TypeOf((*MockStrategyInterface)(nil).Roles))
}
// Routes mocks base method.
func (m *MockStrategyInterface) Routes() []*v1.Route {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Routes")
ret0, _ := ret[0].([]*v1.Route)
return ret0
}
// Routes indicates an expected call of Routes.
func (mr *MockStrategyInterfaceMockRecorder) Routes() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Routes", reflect.TypeOf((*MockStrategyInterface)(nil).Routes))
}
// SCCs mocks base method.
func (m *MockStrategyInterface) SCCs() []*v10.SecurityContextConstraints {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SCCs")
ret0, _ := ret[0].([]*v10.SecurityContextConstraints)
return ret0
}
// SCCs indicates an expected call of SCCs.
func (mr *MockStrategyInterfaceMockRecorder) SCCs() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SCCs", reflect.TypeOf((*MockStrategyInterface)(nil).SCCs))
}
// ServiceAccounts mocks base method.
func (m *MockStrategyInterface) ServiceAccounts() []*v14.ServiceAccount {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ServiceAccounts")
ret0, _ := ret[0].([]*v14.ServiceAccount)
return ret0
}
// ServiceAccounts indicates an expected call of ServiceAccounts.
func (mr *MockStrategyInterfaceMockRecorder) ServiceAccounts() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceAccounts", reflect.TypeOf((*MockStrategyInterface)(nil).ServiceAccounts))
}
// ServiceMonitors mocks base method.
func (m *MockStrategyInterface) ServiceMonitors() []*v11.ServiceMonitor {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ServiceMonitors")
ret0, _ := ret[0].([]*v11.ServiceMonitor)
return ret0
}
// ServiceMonitors indicates an expected call of ServiceMonitors.
func (mr *MockStrategyInterfaceMockRecorder) ServiceMonitors() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceMonitors", reflect.TypeOf((*MockStrategyInterface)(nil).ServiceMonitors))
}
// Services mocks base method.
func (m *MockStrategyInterface) Services() []*v14.Service {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Services")
ret0, _ := ret[0].([]*v14.Service)
return ret0
}
// Services indicates an expected call of Services.
func (mr *MockStrategyInterfaceMockRecorder) Services() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Services", reflect.TypeOf((*MockStrategyInterface)(nil).Services))
}
// SynchronizationControllerDeployments mocks base method.
func (m *MockStrategyInterface) SynchronizationControllerDeployments() []*v13.Deployment {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SynchronizationControllerDeployments")
ret0, _ := ret[0].([]*v13.Deployment)
return ret0
}
// SynchronizationControllerDeployments indicates an expected call of SynchronizationControllerDeployments.
func (mr *MockStrategyInterfaceMockRecorder) SynchronizationControllerDeployments() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SynchronizationControllerDeployments", reflect.TypeOf((*MockStrategyInterface)(nil).SynchronizationControllerDeployments))
}
// ValidatingAdmissionPolicies mocks base method.
func (m *MockStrategyInterface) ValidatingAdmissionPolicies() []*v12.ValidatingAdmissionPolicy {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidatingAdmissionPolicies")
ret0, _ := ret[0].([]*v12.ValidatingAdmissionPolicy)
return ret0
}
// ValidatingAdmissionPolicies indicates an expected call of ValidatingAdmissionPolicies.
func (mr *MockStrategyInterfaceMockRecorder) ValidatingAdmissionPolicies() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatingAdmissionPolicies", reflect.TypeOf((*MockStrategyInterface)(nil).ValidatingAdmissionPolicies))
}
// ValidatingAdmissionPolicyBindings mocks base method.
func (m *MockStrategyInterface) ValidatingAdmissionPolicyBindings() []*v12.ValidatingAdmissionPolicyBinding {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidatingAdmissionPolicyBindings")
ret0, _ := ret[0].([]*v12.ValidatingAdmissionPolicyBinding)
return ret0
}
// ValidatingAdmissionPolicyBindings indicates an expected call of ValidatingAdmissionPolicyBindings.
func (mr *MockStrategyInterfaceMockRecorder) ValidatingAdmissionPolicyBindings() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatingAdmissionPolicyBindings", reflect.TypeOf((*MockStrategyInterface)(nil).ValidatingAdmissionPolicyBindings))
}
// ValidatingWebhookConfigurations mocks base method.
func (m *MockStrategyInterface) ValidatingWebhookConfigurations() []*v12.ValidatingWebhookConfiguration {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidatingWebhookConfigurations")
ret0, _ := ret[0].([]*v12.ValidatingWebhookConfiguration)
return ret0
}
// ValidatingWebhookConfigurations indicates an expected call of ValidatingWebhookConfigurations.
func (mr *MockStrategyInterfaceMockRecorder) ValidatingWebhookConfigurations() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatingWebhookConfigurations", reflect.TypeOf((*MockStrategyInterface)(nil).ValidatingWebhookConfigurations))
}
package install
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
import (
"bufio"
"bytes"
"compress/gzip"
"context"
"encoding/base64"
"fmt"
"io"
"strings"
routev1 "github.com/openshift/api/route/v1"
secv1 "github.com/openshift/api/security/v1"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
ext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
extv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
k8coresv1 "k8s.io/client-go/kubernetes/typed/core/v1"
apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"sigs.k8s.io/yaml"
v1 "kubevirt.io/api/core/v1"
instancetypev1beta1 "kubevirt.io/api/instancetype/v1beta1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/monitoring/rules"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/rbac"
operatorutil "kubevirt.io/kubevirt/pkg/virt-operator/util"
marshalutil "kubevirt.io/kubevirt/tools/util"
)
const ManifestsEncodingGzipBase64 = "gzip+base64"
//go:generate mockgen -source $GOFILE -imports "libvirt=libvirt.org/go/libvirt" -package=$GOPACKAGE -destination=generated_mock_$GOFILE
type APIServiceInterface interface {
Get(ctx context.Context, name string, options metav1.GetOptions) (*apiregv1.APIService, error)
Create(ctx context.Context, apiService *apiregv1.APIService, opts metav1.CreateOptions) (*apiregv1.APIService, error)
Delete(ctx context.Context, name string, options metav1.DeleteOptions) error
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apiregv1.APIService, err error)
}
type StrategyInterface interface {
ServiceAccounts() []*corev1.ServiceAccount
ClusterRoles() []*rbacv1.ClusterRole
ClusterRoleBindings() []*rbacv1.ClusterRoleBinding
Roles() []*rbacv1.Role
RoleBindings() []*rbacv1.RoleBinding
Services() []*corev1.Service
Deployments() []*appsv1.Deployment
ApiDeployments() []*appsv1.Deployment
ControllerDeployments() []*appsv1.Deployment
ExportProxyDeployments() []*appsv1.Deployment
SynchronizationControllerDeployments() []*appsv1.Deployment
DaemonSets() []*appsv1.DaemonSet
ValidatingWebhookConfigurations() []*admissionregistrationv1.ValidatingWebhookConfiguration
MutatingWebhookConfigurations() []*admissionregistrationv1.MutatingWebhookConfiguration
APIServices() []*apiregv1.APIService
CertificateSecrets() []*corev1.Secret
SCCs() []*secv1.SecurityContextConstraints
ServiceMonitors() []*promv1.ServiceMonitor
PrometheusRules() []*promv1.PrometheusRule
ConfigMaps() []*corev1.ConfigMap
CRDs() []*extv1.CustomResourceDefinition
Routes() []*routev1.Route
Instancetypes() []*instancetypev1beta1.VirtualMachineClusterInstancetype
Preferences() []*instancetypev1beta1.VirtualMachineClusterPreference
ValidatingAdmissionPolicyBindings() []*admissionregistrationv1.ValidatingAdmissionPolicyBinding
ValidatingAdmissionPolicies() []*admissionregistrationv1.ValidatingAdmissionPolicy
}
type Strategy struct {
serviceAccounts []*corev1.ServiceAccount
clusterRoles []*rbacv1.ClusterRole
clusterRoleBindings []*rbacv1.ClusterRoleBinding
roles []*rbacv1.Role
roleBindings []*rbacv1.RoleBinding
crds []*extv1.CustomResourceDefinition
services []*corev1.Service
deployments []*appsv1.Deployment
daemonSets []*appsv1.DaemonSet
validatingWebhookConfigurations []*admissionregistrationv1.ValidatingWebhookConfiguration
mutatingWebhookConfigurations []*admissionregistrationv1.MutatingWebhookConfiguration
apiServices []*apiregv1.APIService
certificateSecrets []*corev1.Secret
sccs []*secv1.SecurityContextConstraints
serviceMonitors []*promv1.ServiceMonitor
prometheusRules []*promv1.PrometheusRule
configMaps []*corev1.ConfigMap
routes []*routev1.Route
instancetypes []*instancetypev1beta1.VirtualMachineClusterInstancetype
preferences []*instancetypev1beta1.VirtualMachineClusterPreference
validatingAdmissionPolicyBindings []*admissionregistrationv1.ValidatingAdmissionPolicyBinding
validatingAdmissionPolicies []*admissionregistrationv1.ValidatingAdmissionPolicy
}
func (ins *Strategy) ServiceAccounts() []*corev1.ServiceAccount {
return ins.serviceAccounts
}
func (ins *Strategy) ClusterRoles() []*rbacv1.ClusterRole {
return ins.clusterRoles
}
func (ins *Strategy) ClusterRoleBindings() []*rbacv1.ClusterRoleBinding {
return ins.clusterRoleBindings
}
func (ins *Strategy) Roles() []*rbacv1.Role {
return ins.roles
}
func (ins *Strategy) RoleBindings() []*rbacv1.RoleBinding {
return ins.roleBindings
}
func (ins *Strategy) Services() []*corev1.Service {
return ins.services
}
func (ins *Strategy) Deployments() []*appsv1.Deployment {
return ins.deployments
}
func (ins *Strategy) ApiDeployments() []*appsv1.Deployment {
var deployments []*appsv1.Deployment
for _, deployment := range ins.deployments {
if !strings.Contains(deployment.Name, "virt-api") {
continue
}
deployments = append(deployments, deployment)
}
return deployments
}
func (ins *Strategy) ControllerDeployments() []*appsv1.Deployment {
var deployments []*appsv1.Deployment
for _, deployment := range ins.deployments {
if !strings.Contains(deployment.Name, "virt-controller") {
continue
}
deployments = append(deployments, deployment)
}
return deployments
}
func (ins *Strategy) ExportProxyDeployments() []*appsv1.Deployment {
var deployments []*appsv1.Deployment
for _, deployment := range ins.deployments {
if !strings.Contains(deployment.Name, "virt-exportproxy") {
continue
}
deployments = append(deployments, deployment)
}
return deployments
}
func (ins *Strategy) SynchronizationControllerDeployments() []*appsv1.Deployment {
var deployments []*appsv1.Deployment
for _, deployment := range ins.deployments {
if !strings.Contains(deployment.Name, "virt-synchronization-controller") {
continue
}
deployments = append(deployments, deployment)
}
return deployments
}
func (ins *Strategy) DaemonSets() []*appsv1.DaemonSet {
return ins.daemonSets
}
func (ins *Strategy) ValidatingWebhookConfigurations() []*admissionregistrationv1.ValidatingWebhookConfiguration {
return ins.validatingWebhookConfigurations
}
func (ins *Strategy) MutatingWebhookConfigurations() []*admissionregistrationv1.MutatingWebhookConfiguration {
return ins.mutatingWebhookConfigurations
}
func (ins *Strategy) APIServices() []*apiregv1.APIService {
return ins.apiServices
}
func (ins *Strategy) CertificateSecrets() []*corev1.Secret {
return ins.certificateSecrets
}
func (ins *Strategy) SCCs() []*secv1.SecurityContextConstraints {
return ins.sccs
}
func (ins *Strategy) ServiceMonitors() []*promv1.ServiceMonitor {
return ins.serviceMonitors
}
func (ins *Strategy) PrometheusRules() []*promv1.PrometheusRule {
return ins.prometheusRules
}
func (ins *Strategy) ConfigMaps() []*corev1.ConfigMap {
return ins.configMaps
}
func (ins *Strategy) CRDs() []*extv1.CustomResourceDefinition {
return ins.crds
}
func (ins *Strategy) Routes() []*routev1.Route {
return ins.routes
}
func (ins *Strategy) Instancetypes() []*instancetypev1beta1.VirtualMachineClusterInstancetype {
return ins.instancetypes
}
func (ins *Strategy) Preferences() []*instancetypev1beta1.VirtualMachineClusterPreference {
return ins.preferences
}
func (ins *Strategy) ValidatingAdmissionPolicyBindings() []*admissionregistrationv1.ValidatingAdmissionPolicyBinding {
return ins.validatingAdmissionPolicyBindings
}
func (ins *Strategy) ValidatingAdmissionPolicies() []*admissionregistrationv1.ValidatingAdmissionPolicy {
return ins.validatingAdmissionPolicies
}
func encodeManifests(manifests []byte) (string, error) {
var buf bytes.Buffer
zw := gzip.NewWriter(&buf)
_, err := zw.Write(manifests)
if err != nil {
return "", err
}
if err = zw.Close(); err != nil {
return "", err
}
base64Strategy := base64.StdEncoding.EncodeToString(buf.Bytes())
return base64Strategy, nil
}
func decodeManifests(strategy []byte) (string, error) {
var decodedStrategy strings.Builder
gzippedStrategy, err := base64.StdEncoding.DecodeString(string(strategy))
if err != nil {
return "", err
}
buf := bytes.NewBuffer(gzippedStrategy)
zr, err := gzip.NewReader(buf)
if err != nil {
return "", err
}
if _, err := io.Copy(&decodedStrategy, zr); err != nil {
return "", err
}
return decodedStrategy.String(), nil
}
func NewInstallStrategyConfigMap(config *operatorutil.KubeVirtDeploymentConfig, monitorNamespace string, operatorNamespace string) (*corev1.ConfigMap, error) {
strategy, err := GenerateCurrentInstallStrategy(config, monitorNamespace, operatorNamespace)
if err != nil {
return nil, err
}
manifests, err := encodeManifests(dumpInstallStrategyToBytes(strategy))
if err != nil {
return nil, err
}
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "kubevirt-install-strategy-",
Namespace: config.GetNamespace(),
Labels: map[string]string{
v1.ManagedByLabel: v1.ManagedByLabelOperatorValue,
v1.InstallStrategyLabel: "",
},
Annotations: map[string]string{
v1.InstallStrategyVersionAnnotation: config.GetKubeVirtVersion(),
v1.InstallStrategyRegistryAnnotation: config.GetImageRegistry(),
v1.InstallStrategyIdentifierAnnotation: config.GetDeploymentID(),
v1.InstallStrategyConfigMapEncoding: ManifestsEncodingGzipBase64,
},
},
Data: map[string]string{
"manifests": manifests,
},
}
return configMap, nil
}
func getMonitorNamespace(clientset k8coresv1.CoreV1Interface, config *operatorutil.KubeVirtDeploymentConfig) (namespace string, err error) {
for _, ns := range config.GetPotentialMonitorNamespaces() {
if nsExists, err := isNamespaceExist(clientset, ns); nsExists {
// the monitoring service account must be in the monitoring namespace otherwise
// we won't be able to create roleBinding for prometheus operator pods
if saExists, err := isServiceAccountExist(clientset, ns, config.GetMonitorServiceAccountName()); saExists {
return ns, nil
} else if err != nil {
return "", err
}
} else if err != nil {
return "", err
}
}
return "", nil
}
func DumpInstallStrategyToConfigMap(clientset kubecli.KubevirtClient, operatorNamespace string) error {
config, err := operatorutil.GetConfigFromEnv()
if err != nil {
return err
}
monitorNamespace, err := getMonitorNamespace(clientset.CoreV1(), config)
if err != nil {
return err
}
configMap, err := NewInstallStrategyConfigMap(config, monitorNamespace, operatorNamespace)
if err != nil {
return err
}
_, err = clientset.CoreV1().ConfigMaps(config.GetNamespace()).Create(context.Background(), configMap, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create new install strategy configmap: %v", err)
}
return nil
}
func dumpInstallStrategyToBytes(strategy *Strategy) []byte {
var b bytes.Buffer
writer := bufio.NewWriter(&b)
for _, entry := range strategy.serviceAccounts {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.clusterRoles {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.clusterRoleBindings {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.roles {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.roleBindings {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.crds {
b, _ := yaml.Marshal(entry)
writer.Write([]byte("---\n"))
writer.Write(b)
}
for _, entry := range strategy.services {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.certificateSecrets {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.validatingWebhookConfigurations {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.mutatingWebhookConfigurations {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.validatingAdmissionPolicyBindings {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.validatingAdmissionPolicies {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.apiServices {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.deployments {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.daemonSets {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.sccs {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.serviceMonitors {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.prometheusRules {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.configMaps {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.routes {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.instancetypes {
marshalutil.MarshallObject(entry, writer)
}
for _, entry := range strategy.preferences {
marshalutil.MarshallObject(entry, writer)
}
writer.Flush()
return b.Bytes()
}
func GenerateCurrentInstallStrategy(config *operatorutil.KubeVirtDeploymentConfig, monitorNamespace string, operatorNamespace string) (*Strategy, error) {
strategy := &Strategy{}
functions := []func() (*extv1.CustomResourceDefinition, error){
components.NewVirtualMachineInstanceCrd, components.NewPresetCrd, components.NewReplicaSetCrd,
components.NewVirtualMachineCrd, components.NewVirtualMachineInstanceMigrationCrd,
components.NewVirtualMachineSnapshotCrd, components.NewVirtualMachineSnapshotContentCrd,
components.NewVirtualMachineRestoreCrd, components.NewVirtualMachineInstancetypeCrd,
components.NewVirtualMachineClusterInstancetypeCrd, components.NewVirtualMachinePoolCrd,
components.NewMigrationPolicyCrd, components.NewVirtualMachinePreferenceCrd,
components.NewVirtualMachineClusterPreferenceCrd, components.NewVirtualMachineExportCrd,
components.NewVirtualMachineCloneCrd, components.NewVirtualMachineBackupCrd,
components.NewVirtualMachineBackupTrackerCrd,
}
for _, f := range functions {
crd, err := f()
if err != nil {
return nil, err
}
strategy.crds = append(strategy.crds, crd)
}
rbaclist := make([]runtime.Object, 0)
rbaclist = append(rbaclist, rbac.GetAllCluster()...)
rbaclist = append(rbaclist, rbac.GetAllApiServer(config.GetNamespace())...)
rbaclist = append(rbaclist, rbac.GetAllController(config.GetNamespace())...)
rbaclist = append(rbaclist, rbac.GetAllHandler(config.GetNamespace())...)
rbaclist = append(rbaclist, rbac.GetAllExportProxy(config.GetNamespace())...)
rbaclist = append(rbaclist, rbac.GetAllSynchronizationController(config.GetNamespace())...)
monitorServiceAccount := config.GetMonitorServiceAccountName()
isServiceAccountFound := monitorNamespace != ""
if isServiceAccountFound {
serviceMonitorNamespace := config.GetServiceMonitorNamespace()
if serviceMonitorNamespace == "" {
serviceMonitorNamespace = monitorNamespace
}
rbaclist = append(rbaclist, rbac.GetAllServiceMonitor(config.GetNamespace(), monitorNamespace, monitorServiceAccount)...)
strategy.serviceMonitors = append(strategy.serviceMonitors, components.NewServiceMonitorCR(config.GetNamespace(), serviceMonitorNamespace, true))
err := rules.SetupRules(config.GetNamespace())
if err != nil {
return nil, err
}
prometheusRule, err := rules.BuildPrometheusRule(config.GetNamespace())
if err != nil {
return nil, err
}
strategy.prometheusRules = append(strategy.prometheusRules, prometheusRule)
} else {
log.Log.Warningf("failed to create ServiceMonitor resources because couldn't find ServiceAccount %v in any monitoring namespaces : %v", monitorServiceAccount, strings.Join(config.GetPotentialMonitorNamespaces(), ", "))
}
for _, entry := range rbaclist {
cr, ok := entry.(*rbacv1.ClusterRole)
if ok {
strategy.clusterRoles = append(strategy.clusterRoles, cr)
}
crb, ok := entry.(*rbacv1.ClusterRoleBinding)
if ok {
strategy.clusterRoleBindings = append(strategy.clusterRoleBindings, crb)
}
r, ok := entry.(*rbacv1.Role)
if ok {
strategy.roles = append(strategy.roles, r)
}
rb, ok := entry.(*rbacv1.RoleBinding)
if ok {
strategy.roleBindings = append(strategy.roleBindings, rb)
}
sa, ok := entry.(*corev1.ServiceAccount)
if ok {
strategy.serviceAccounts = append(strategy.serviceAccounts, sa)
}
}
var productName string
var productVersion string
var productComponent string
invalidLabelPatternErrorMessage := "invalid %s: labels must be 63 characters or less, begin and end with alphanumeric characters, and contain only dot, hyphen or dash"
if operatorutil.IsValidLabel(config.GetProductName()) {
productName = config.GetProductName()
} else {
log.Log.Errorf(invalidLabelPatternErrorMessage, "kubevirt.spec.productName")
}
if operatorutil.IsValidLabel(config.GetProductVersion()) {
productVersion = config.GetProductVersion()
} else {
log.Log.Errorf(invalidLabelPatternErrorMessage, "kubevirt.spec.productVersion")
}
if operatorutil.IsValidLabel(config.GetProductComponent()) {
productComponent = config.GetProductComponent()
} else {
log.Log.Errorf(invalidLabelPatternErrorMessage, "kubevirt.spec.productComponent")
}
strategy.validatingWebhookConfigurations = append(strategy.validatingWebhookConfigurations, components.NewOpertorValidatingWebhookConfiguration(operatorNamespace))
strategy.validatingWebhookConfigurations = append(strategy.validatingWebhookConfigurations, components.NewVirtAPIValidatingWebhookConfiguration(config.GetNamespace()))
strategy.mutatingWebhookConfigurations = append(strategy.mutatingWebhookConfigurations, components.NewVirtAPIMutatingWebhookConfiguration(config.GetNamespace()))
strategy.services = append(strategy.services, components.NewPrometheusService(config.GetNamespace()))
strategy.services = append(strategy.services, components.NewApiServerService(config.GetNamespace()))
strategy.services = append(strategy.services, components.NewOperatorWebhookService(operatorNamespace))
strategy.services = append(strategy.services, components.NewExportProxyService(config.GetNamespace()))
apiDeployment := components.NewApiServerDeployment(config.GetNamespace(), config.GetImageRegistry(), config.GetImagePrefix(), config.GetApiVersion(), productName, productVersion, productComponent, config.VirtApiImage, config.GetImagePullPolicy(), config.GetImagePullSecrets(), config.GetVerbosity(), config.GetExtraEnv())
strategy.deployments = append(strategy.deployments, apiDeployment)
controller := components.NewControllerDeployment(config.GetNamespace(), config.GetImageRegistry(), config.GetImagePrefix(), config.GetControllerVersion(), config.GetLauncherVersion(), config.GetExportServerVersion(), config.GetSidecarShimVersion(), productName, productVersion, productComponent, config.VirtControllerImage, config.VirtLauncherImage, config.VirtExportServerImage, config.SidecarShimImage, config.GetImagePullPolicy(), config.GetImagePullSecrets(), config.GetVerbosity(), config.GetExtraEnv())
strategy.deployments = append(strategy.deployments, controller)
strategy.configMaps = append(strategy.configMaps, components.NewCAConfigMaps(operatorNamespace)...)
exportProxyDeployment := components.NewExportProxyDeployment(config.GetNamespace(), config.GetImageRegistry(), config.GetImagePrefix(), config.GetExportProxyVersion(), productName, productVersion, productComponent, config.VirtExportProxyImage, config.GetImagePullPolicy(), config.GetImagePullSecrets(), config.GetVerbosity(), config.GetExtraEnv())
strategy.deployments = append(strategy.deployments, exportProxyDeployment)
synchronizationControllerDeployment := components.NewSynchronizationControllerDeployment(config.GetNamespace(), config.GetImageRegistry(), config.GetImagePrefix(), config.GetSynchronizationControllerVersion(), productName, productVersion, productComponent, config.VirtSynchronizationControllerImage, config.GetImagePullPolicy(), config.GetImagePullSecrets(), config.GetMigrationNetwork(), config.GetSynchronizationPort(), config.GetVerbosity(), config.GetExtraEnv())
strategy.deployments = append(strategy.deployments, synchronizationControllerDeployment)
handler := components.NewHandlerDaemonSet(config.GetNamespace(), config.GetImageRegistry(), config.GetImagePrefix(), config.GetHandlerVersion(), config.GetLauncherVersion(), config.GetPrHelperVersion(), config.GetSidecarShimVersion(), productName, productVersion, productComponent, config.VirtHandlerImage, config.VirtLauncherImage, config.PrHelperImage, config.SidecarShimImage, config.GetImagePullPolicy(), config.GetImagePullSecrets(), config.GetMigrationNetwork(), config.GetVerbosity(), config.GetExtraEnv(), config.PersistentReservationEnabled())
strategy.daemonSets = append(strategy.daemonSets, handler)
strategy.sccs = append(strategy.sccs, components.GetAllSCC(config.GetNamespace())...)
strategy.apiServices = components.NewVirtAPIAPIServices(config.GetNamespace())
strategy.certificateSecrets = components.NewCertSecrets(config.GetNamespace(), operatorNamespace)
strategy.certificateSecrets = append(strategy.certificateSecrets, components.NewCACertSecrets(operatorNamespace)...)
strategy.configMaps = append(strategy.configMaps, components.NewCAConfigMaps(operatorNamespace)...)
strategy.routes = append(strategy.routes, components.GetAllRoutes(operatorNamespace)...)
strategy.validatingAdmissionPolicyBindings = append(strategy.validatingAdmissionPolicyBindings, components.NewHandlerV1ValidatingAdmissionPolicyBinding())
virtHandlerServiceAccount := getVirtHandlerServiceAccount(config.GetNamespace())
strategy.validatingAdmissionPolicies = append(strategy.validatingAdmissionPolicies, components.NewHandlerV1ValidatingAdmissionPolicy(virtHandlerServiceAccount))
instancetypes, err := components.NewClusterInstancetypes()
if err != nil {
return nil, fmt.Errorf("error generating instancetypes for environment %v", err)
}
strategy.instancetypes = instancetypes
preferences, err := components.NewClusterPreferences()
if err != nil {
return nil, fmt.Errorf("error generating preferences for environment %v", err)
}
strategy.preferences = preferences
return strategy, nil
}
func getVirtHandlerServiceAccount(namespace string) string {
prefix := fmt.Sprintf("system:serviceaccount:%s", namespace)
return fmt.Sprintf("%s:%s", prefix, components.HandlerServiceAccountName)
}
func mostRecentConfigMap(configMaps []*corev1.ConfigMap) *corev1.ConfigMap {
var configMap *corev1.ConfigMap
// choose the most recent configmap if multiple match.
mostRecentTime := metav1.Time{}
for _, config := range configMaps {
if configMap == nil || mostRecentTime.Before(&config.ObjectMeta.CreationTimestamp) {
configMap = config
mostRecentTime = config.ObjectMeta.CreationTimestamp
}
}
return configMap
}
func isEncoded(configMap *corev1.ConfigMap) bool {
_, ok := configMap.Annotations[v1.InstallStrategyConfigMapEncoding]
return ok
}
func getManifests(configMap *corev1.ConfigMap) (string, error) {
manifests, ok := configMap.Data["manifests"]
if !ok {
return "", fmt.Errorf("install strategy configmap %s does not contain 'manifests' key", configMap.Name)
}
if isEncoded(configMap) {
var err error
manifests, err = decodeManifests([]byte(manifests))
if err != nil {
return "", err
}
}
return manifests, nil
}
func LoadInstallStrategyFromCache(stores operatorutil.Stores, config *operatorutil.KubeVirtDeploymentConfig) (*Strategy, error) {
var matchingConfigMaps []*corev1.ConfigMap
for _, obj := range stores.InstallStrategyConfigMapCache.List() {
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
continue
} else if cm.ObjectMeta.Annotations == nil {
continue
} else if cm.ObjectMeta.Namespace != config.GetNamespace() {
continue
}
// deprecated, keep it for backwards compatibility
version, _ := cm.ObjectMeta.Annotations[v1.InstallStrategyVersionAnnotation]
// deprecated, keep it for backwards compatibility
registry, _ := cm.ObjectMeta.Annotations[v1.InstallStrategyRegistryAnnotation]
id, _ := cm.ObjectMeta.Annotations[v1.InstallStrategyIdentifierAnnotation]
if id == config.GetDeploymentID() ||
(id == "" && version == config.GetKubeVirtVersion() && registry == config.GetImageRegistry()) {
matchingConfigMaps = append(matchingConfigMaps, cm)
}
}
if len(matchingConfigMaps) == 0 {
return nil, fmt.Errorf("no install strategy configmap found for version %s with registry %s", config.GetKubeVirtVersion(), config.GetImageRegistry())
}
manifests, err := getManifests(mostRecentConfigMap(matchingConfigMaps))
if err != nil {
return nil, err
}
strategy, err := loadInstallStrategyFromBytes(manifests)
if err != nil {
return nil, err
}
return strategy, nil
}
func loadInstallStrategyFromBytes(data string) (*Strategy, error) {
strategy := &Strategy{}
entries := strings.Split(data, "---")
for _, entry := range entries {
entry := strings.TrimSpace(entry)
if entry == "" {
continue
}
var obj metav1.TypeMeta
if err := yaml.Unmarshal([]byte(entry), &obj); err != nil {
return nil, err
}
switch obj.Kind {
case "ValidatingWebhookConfiguration":
webhook := &admissionregistrationv1.ValidatingWebhookConfiguration{}
if err := yaml.Unmarshal([]byte(entry), &webhook); err != nil {
return nil, err
}
webhook.TypeMeta = obj
strategy.validatingWebhookConfigurations = append(strategy.validatingWebhookConfigurations, webhook)
case "MutatingWebhookConfiguration":
webhook := &admissionregistrationv1.MutatingWebhookConfiguration{}
if err := yaml.Unmarshal([]byte(entry), &webhook); err != nil {
return nil, err
}
webhook.TypeMeta = obj
strategy.mutatingWebhookConfigurations = append(strategy.mutatingWebhookConfigurations, webhook)
case "ValidatingAdmissionPolicyBinding":
validatingAdmissionPolicyBinding := &admissionregistrationv1.ValidatingAdmissionPolicyBinding{}
if err := yaml.Unmarshal([]byte(entry), &validatingAdmissionPolicyBinding); err != nil {
return nil, err
}
validatingAdmissionPolicyBinding.TypeMeta = obj
strategy.validatingAdmissionPolicyBindings = append(strategy.validatingAdmissionPolicyBindings, validatingAdmissionPolicyBinding)
case "ValidatingAdmissionPolicy":
validatingAdmissionPolicy := &admissionregistrationv1.ValidatingAdmissionPolicy{}
if err := yaml.Unmarshal([]byte(entry), &validatingAdmissionPolicy); err != nil {
return nil, err
}
validatingAdmissionPolicy.TypeMeta = obj
strategy.validatingAdmissionPolicies = append(strategy.validatingAdmissionPolicies, validatingAdmissionPolicy)
case "APIService":
apiService := &apiregv1.APIService{}
if err := yaml.Unmarshal([]byte(entry), &apiService); err != nil {
return nil, err
}
strategy.apiServices = append(strategy.apiServices, apiService)
case "Secret":
secret := &corev1.Secret{}
if err := yaml.Unmarshal([]byte(entry), &secret); err != nil {
return nil, err
}
strategy.certificateSecrets = append(strategy.certificateSecrets, secret)
case "ServiceAccount":
sa := &corev1.ServiceAccount{}
if err := yaml.Unmarshal([]byte(entry), &sa); err != nil {
return nil, err
}
strategy.serviceAccounts = append(strategy.serviceAccounts, sa)
case "ClusterRole":
cr := &rbacv1.ClusterRole{}
if err := yaml.Unmarshal([]byte(entry), &cr); err != nil {
return nil, err
}
strategy.clusterRoles = append(strategy.clusterRoles, cr)
case "ClusterRoleBinding":
crb := &rbacv1.ClusterRoleBinding{}
if err := yaml.Unmarshal([]byte(entry), &crb); err != nil {
return nil, err
}
strategy.clusterRoleBindings = append(strategy.clusterRoleBindings, crb)
case "Role":
r := &rbacv1.Role{}
if err := yaml.Unmarshal([]byte(entry), &r); err != nil {
return nil, err
}
strategy.roles = append(strategy.roles, r)
case "RoleBinding":
rb := &rbacv1.RoleBinding{}
if err := yaml.Unmarshal([]byte(entry), &rb); err != nil {
return nil, err
}
strategy.roleBindings = append(strategy.roleBindings, rb)
case "Service":
s := &corev1.Service{}
if err := yaml.Unmarshal([]byte(entry), &s); err != nil {
return nil, err
}
strategy.services = append(strategy.services, s)
case "Deployment":
d := &appsv1.Deployment{}
if err := yaml.Unmarshal([]byte(entry), &d); err != nil {
return nil, err
}
strategy.deployments = append(strategy.deployments, d)
case "DaemonSet":
d := &appsv1.DaemonSet{}
if err := yaml.Unmarshal([]byte(entry), &d); err != nil {
return nil, err
}
strategy.daemonSets = append(strategy.daemonSets, d)
case "CustomResourceDefinition":
crdv1 := &extv1.CustomResourceDefinition{}
switch obj.APIVersion {
case extv1beta1.SchemeGroupVersion.String():
crd := &ext.CustomResourceDefinition{}
crdv1beta1 := &extv1beta1.CustomResourceDefinition{}
if err := yaml.Unmarshal([]byte(entry), &crdv1beta1); err != nil {
return nil, err
}
err := extv1beta1.Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(crdv1beta1, crd, nil)
if err != nil {
return nil, err
}
err = extv1.Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(crd, crdv1, nil)
if err != nil {
return nil, err
}
case extv1.SchemeGroupVersion.String():
if err := yaml.Unmarshal([]byte(entry), &crdv1); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("crd ApiVersion %s not supported", obj.APIVersion)
}
strategy.crds = append(strategy.crds, crdv1)
case "SecurityContextConstraints":
s := &secv1.SecurityContextConstraints{}
if err := yaml.Unmarshal([]byte(entry), &s); err != nil {
return nil, err
}
strategy.sccs = append(strategy.sccs, s)
case "ServiceMonitor":
sm := &promv1.ServiceMonitor{}
if err := yaml.Unmarshal([]byte(entry), &sm); err != nil {
return nil, err
}
strategy.serviceMonitors = append(strategy.serviceMonitors, sm)
case "PrometheusRule":
pr := &promv1.PrometheusRule{}
if err := yaml.Unmarshal([]byte(entry), &pr); err != nil {
return nil, err
}
strategy.prometheusRules = append(strategy.prometheusRules, pr)
case "ConfigMap":
configMap := &corev1.ConfigMap{}
if err := yaml.Unmarshal([]byte(entry), &configMap); err != nil {
return nil, err
}
strategy.configMaps = append(strategy.configMaps, configMap)
case "Route":
route := &routev1.Route{}
if err := yaml.Unmarshal([]byte(entry), &route); err != nil {
return nil, err
}
strategy.routes = append(strategy.routes, route)
case "VirtualMachineClusterInstancetype":
instancetype := &instancetypev1beta1.VirtualMachineClusterInstancetype{}
if err := yaml.Unmarshal([]byte(entry), &instancetype); err != nil {
return nil, err
}
strategy.instancetypes = append(strategy.instancetypes, instancetype)
case "VirtualMachineClusterPreference":
preference := &instancetypev1beta1.VirtualMachineClusterPreference{}
if err := yaml.Unmarshal([]byte(entry), &preference); err != nil {
return nil, err
}
strategy.preferences = append(strategy.preferences, preference)
default:
return nil, fmt.Errorf("UNKNOWN TYPE %s detected", obj.Kind)
}
log.Log.Infof("%s loaded", obj.Kind)
}
return strategy, nil
}
func isNamespaceExist(clientset k8coresv1.CoreV1Interface, ns string) (bool, error) {
_, err := clientset.Namespaces().Get(context.Background(), ns, metav1.GetOptions{})
if err == nil {
return true, nil
}
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
func isServiceAccountExist(clientset k8coresv1.CoreV1Interface, ns string, serviceAccount string) (bool, error) {
_, err := clientset.ServiceAccounts(ns).Get(context.Background(), serviceAccount, metav1.GetOptions{})
if err == nil {
return true, nil
}
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package rbac
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"kubevirt.io/api/instancetype"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/migrations"
)
const (
VersionName = "rbac.authorization.k8s.io"
VersionNamev1 = "rbac.authorization.k8s.io/v1"
GroupName = "kubevirt.io"
)
func GetAllApiServer(namespace string) []runtime.Object {
return []runtime.Object{
newApiServerServiceAccount(namespace),
newApiServerClusterRole(),
newApiServerClusterRoleBinding(namespace),
newApiServerAuthDelegatorClusterRoleBinding(namespace),
newApiServerRole(namespace),
newApiServerRoleBinding(namespace),
}
}
func newApiServerServiceAccount(namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: components.ApiServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
}
}
func newApiServerClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.ApiServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"pods",
},
Verbs: []string{
"get", "list", "delete", "patch",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
"virtualmachines",
"virtualmachineinstances",
},
Verbs: []string{
"get", "list", "watch", "patch", "update",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"persistentvolumeclaims",
},
Verbs: []string{
"get",
"list",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
"virtualmachines/status",
},
Verbs: []string{
"patch",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
"virtualmachineinstancemigrations",
},
Verbs: []string{
"create", "get", "list", "watch", "patch",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
"virtualmachineinstancepresets",
},
Verbs: []string{
"watch", "list",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"configmaps",
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"limitranges",
},
Verbs: []string{
"watch", "list",
},
},
{
APIGroups: []string{
"apiextensions.k8s.io",
},
Resources: []string{
"customresourcedefinitions",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
"kubevirts",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
"snapshot.kubevirt.io",
},
Resources: []string{
"virtualmachinesnapshots",
"virtualmachinerestores",
"virtualmachinesnapshotcontents",
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
"backup.kubevirt.io",
},
Resources: []string{
"virtualmachinebackups",
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
"backup.kubevirt.io",
},
Resources: []string{
"virtualmachinebackuptrackers",
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
"cdi.kubevirt.io",
},
Resources: []string{
"datasources",
"datavolumes",
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"namespaces",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
"instancetype.kubevirt.io",
},
Resources: []string{
instancetype.PluralResourceName,
instancetype.ClusterPluralResourceName,
instancetype.PluralPreferenceResourceName,
instancetype.ClusterPluralPreferenceResourceName,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
migrations.GroupName,
},
Resources: []string{
migrations.ResourceMigrationPolicies,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
"apps",
},
Resources: []string{
"controllerrevisions",
},
Verbs: []string{
"create",
"list",
"get",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"nodes",
},
Verbs: []string{
"get",
},
},
},
}
}
func newApiServerClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.ApiServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: VersionName,
Kind: "ClusterRole",
Name: components.ApiServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.ApiServiceAccountName,
},
},
}
}
func newApiServerAuthDelegatorClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubevirt-apiserver-auth-delegator",
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: VersionName,
Kind: "ClusterRole",
Name: "system:auth-delegator",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.ApiServiceAccountName,
},
},
}
}
func newApiServerRole(namespace string) *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: components.ApiServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"configmaps",
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newApiServerRoleBinding(namespace string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: components.ApiServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: VersionName,
Kind: "Role",
Name: components.ApiServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.ApiServiceAccountName,
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package rbac
import (
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"kubevirt.io/api/backup"
"kubevirt.io/api/clone"
"kubevirt.io/api/export"
"kubevirt.io/api/pool"
"kubevirt.io/api/snapshot"
"kubevirt.io/api/instancetype"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/migrations"
)
const (
defaultClusterRoleName = "kubevirt.io:default"
instancetypeViewClusterRoleName = "instancetype.kubevirt.io:view"
apiVersion = "version"
apiGuestFs = "guestfs"
apiExpandVmSpec = "expand-vm-spec"
apiKubevirts = "kubevirts"
apiVM = "virtualmachines"
apiVMInstances = "virtualmachineinstances"
apiVMIPresets = "virtualmachineinstancepresets"
apiVMIReplicasets = "virtualmachineinstancereplicasets"
apiVMIMigrations = "virtualmachineinstancemigrations"
apiVMSnapshots = "virtualmachinesnapshots"
apiVMSnapshotContents = "virtualmachinesnapshotcontents"
apiVMBackups = "virtualmachinebackups"
apiVMBackupTrackers = "virtualmachinebackuptrackers"
apiVMRestores = "virtualmachinerestores"
apiVMExports = "virtualmachineexports"
apiVMClones = "virtualmachineclones"
apiVMPools = "virtualmachinepools"
apiVMExpandSpec = "virtualmachines/expand-spec"
apiVMPortForward = "virtualmachines/portforward"
apiVMStart = "virtualmachines/start"
apiVMStop = "virtualmachines/stop"
apiVMRestart = "virtualmachines/restart"
apiVMAddVolume = "virtualmachines/addvolume"
apiVMRemoveVolume = "virtualmachines/removevolume"
apiVMMigrate = "virtualmachines/migrate"
apiVMMemoryDump = "virtualmachines/memorydump"
apiVMObjectGraph = "virtualmachines/objectgraph"
apiVMEvacuateCancel = "virtualmachines/evacuate/cancel"
apiVMInstancesConsole = "virtualmachineinstances/console"
apiVMInstancesVNC = "virtualmachineinstances/vnc"
apiVMInstancesVNCScreenshot = "virtualmachineinstances/vnc/screenshot"
apiVMInstancesPortForward = "virtualmachineinstances/portforward"
apiVMInstancesPause = "virtualmachineinstances/pause"
apiVMInstancesUnpause = "virtualmachineinstances/unpause"
apiVMInstancesAddVolume = "virtualmachineinstances/addvolume"
apiVMInstancesRemoveVolume = "virtualmachineinstances/removevolume"
apiVMInstancesFreeze = "virtualmachineinstances/freeze"
apiVMInstancesUnfreeze = "virtualmachineinstances/unfreeze"
apiVMInstancesSoftReboot = "virtualmachineinstances/softreboot"
apiVMInstancesReset = "virtualmachineinstances/reset"
apiVMInstancesGuestOSInfo = "virtualmachineinstances/guestosinfo"
apiVMInstancesFileSysList = "virtualmachineinstances/filesystemlist"
apiVMInstancesUserList = "virtualmachineinstances/userlist"
apiVMInstancesSEVFetchCertChain = "virtualmachineinstances/sev/fetchcertchain"
apiVMInstancesSEVQueryLaunchMeasurement = "virtualmachineinstances/sev/querylaunchmeasurement"
apiVMInstancesSEVSetupSession = "virtualmachineinstances/sev/setupsession"
apiVMInstancesSEVInjectLaunchSecret = "virtualmachineinstances/sev/injectlaunchsecret"
apiVMInstancesUSBRedir = "virtualmachineinstances/usbredir"
apiVMInstancesObjectGraph = "virtualmachineinstances/objectgraph"
apiVMInstancesEvacuateCancel = "virtualmachineinstances/evacuate/cancel"
)
func GetAllCluster() []runtime.Object {
return []runtime.Object{
newDefaultClusterRole(),
newDefaultClusterRoleBinding(),
newAdminClusterRole(),
newEditClusterRole(),
newViewClusterRole(),
newInstancetypeViewClusterRole(),
newInstancetypeViewClusterRoleBinding(),
newMigrateClusterRole(),
}
}
func newDefaultClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: defaultClusterRoleName,
Labels: map[string]string{
virtv1.AppLabel: "",
"kubernetes.io/bootstrapping": "rbac-defaults",
},
Annotations: map[string]string{
"rbac.authorization.kubernetes.io/autoupdate": "true",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiKubevirts,
},
Verbs: []string{
"get", "list",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVersion,
apiGuestFs,
},
Verbs: []string{
"get", "list",
},
},
},
}
}
func newDefaultClusterRoleBinding() *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: defaultClusterRoleName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
Annotations: map[string]string{
"rbac.authorization.kubernetes.io/autoupdate": "true",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: VersionName,
Kind: "ClusterRole",
Name: defaultClusterRoleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "Group",
APIGroup: VersionName,
Name: "system:authenticated",
},
},
}
}
func newAdminClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubevirt.io:admin",
Labels: map[string]string{
virtv1.AppLabel: "",
"rbac.authorization.k8s.io/aggregate-to-admin": "true",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMInstancesConsole,
apiVMInstancesVNC,
apiVMInstancesVNCScreenshot,
apiVMInstancesPortForward,
apiVMInstancesGuestOSInfo,
apiVMInstancesFileSysList,
apiVMInstancesUserList,
apiVMInstancesSEVFetchCertChain,
apiVMInstancesSEVQueryLaunchMeasurement,
apiVMInstancesUSBRedir,
apiVMObjectGraph,
apiVMInstancesObjectGraph,
},
Verbs: []string{
"get",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMInstancesPause,
apiVMInstancesUnpause,
apiVMInstancesAddVolume,
apiVMInstancesRemoveVolume,
apiVMInstancesFreeze,
apiVMInstancesUnfreeze,
apiVMInstancesSoftReboot,
apiVMInstancesReset,
apiVMInstancesSEVSetupSession,
apiVMInstancesSEVInjectLaunchSecret,
apiVMInstancesEvacuateCancel,
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMExpandSpec,
apiVMPortForward,
},
Verbs: []string{
"get",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMStart,
apiVMStop,
apiVMRestart,
apiVMAddVolume,
apiVMRemoveVolume,
apiVMMemoryDump,
apiVMEvacuateCancel,
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiExpandVmSpec,
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiVM,
apiVMInstances,
apiVMIPresets,
apiVMIReplicasets,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch", "deletecollection",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiVMIMigrations,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
snapshot.GroupName,
},
Resources: []string{
apiVMSnapshots,
apiVMSnapshotContents,
apiVMRestores,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch", "deletecollection",
},
},
{
APIGroups: []string{
backup.GroupName,
},
Resources: []string{
apiVMBackups,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch", "deletecollection",
},
},
{
APIGroups: []string{
backup.GroupName,
},
Resources: []string{
apiVMBackupTrackers,
apiVMBackupTrackers + "/status",
},
Verbs: []string{
"get", "list", "watch", "create", "update", "patch",
},
},
{
APIGroups: []string{
export.GroupName,
},
Resources: []string{
apiVMExports,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch", "deletecollection",
},
},
{
APIGroups: []string{
clone.GroupName,
},
Resources: []string{
apiVMClones,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch", "deletecollection",
},
},
{
APIGroups: []string{
instancetype.GroupName,
},
Resources: []string{
instancetype.PluralResourceName,
instancetype.ClusterPluralResourceName,
instancetype.PluralPreferenceResourceName,
instancetype.ClusterPluralPreferenceResourceName,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch", "deletecollection",
},
},
{
APIGroups: []string{
pool.GroupName,
},
Resources: []string{
apiVMPools,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch", "deletecollection",
},
},
{
APIGroups: []string{
migrations.GroupName,
},
Resources: []string{
migrations.ResourceMigrationPolicies,
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newEditClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubevirt.io:edit",
Labels: map[string]string{
virtv1.AppLabel: "",
"rbac.authorization.k8s.io/aggregate-to-edit": "true",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMInstancesConsole,
apiVMInstancesVNC,
apiVMInstancesVNCScreenshot,
apiVMInstancesPortForward,
apiVMInstancesGuestOSInfo,
apiVMInstancesFileSysList,
apiVMInstancesUserList,
apiVMInstancesSEVFetchCertChain,
apiVMInstancesSEVQueryLaunchMeasurement,
apiVMInstancesUSBRedir,
apiVMObjectGraph,
apiVMInstancesObjectGraph,
},
Verbs: []string{
"get",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMInstancesPause,
apiVMInstancesUnpause,
apiVMInstancesAddVolume,
apiVMInstancesRemoveVolume,
apiVMInstancesFreeze,
apiVMInstancesUnfreeze,
apiVMInstancesSoftReboot,
apiVMInstancesReset,
apiVMInstancesSEVSetupSession,
apiVMInstancesSEVInjectLaunchSecret,
apiVMInstancesEvacuateCancel,
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMExpandSpec,
apiVMPortForward,
},
Verbs: []string{
"get",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMStart,
apiVMStop,
apiVMRestart,
apiVMAddVolume,
apiVMRemoveVolume,
apiVMMemoryDump,
apiVMEvacuateCancel,
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiExpandVmSpec,
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiVM,
apiVMInstances,
apiVMIPresets,
apiVMIReplicasets,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiVMIMigrations,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
snapshot.GroupName,
},
Resources: []string{
apiVMSnapshots,
apiVMSnapshotContents,
apiVMRestores,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch",
},
},
{
APIGroups: []string{
backup.GroupName,
},
Resources: []string{
apiVMBackups,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch",
},
},
{
APIGroups: []string{
export.GroupName,
},
Resources: []string{
apiVMExports,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch",
},
},
{
APIGroups: []string{
clone.GroupName,
},
Resources: []string{
apiVMClones,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch",
},
},
{
APIGroups: []string{
instancetype.GroupName,
},
Resources: []string{
instancetype.PluralResourceName,
instancetype.ClusterPluralResourceName,
instancetype.PluralPreferenceResourceName,
instancetype.ClusterPluralPreferenceResourceName,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch",
},
},
{
APIGroups: []string{
pool.GroupName,
},
Resources: []string{
apiVMPools,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiKubevirts,
},
Verbs: []string{
"get", "list",
},
},
{
APIGroups: []string{
migrations.GroupName,
},
Resources: []string{
migrations.ResourceMigrationPolicies,
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newMigrateClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubevirt.io:migrate",
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMMigrate,
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiVMIMigrations,
},
Verbs: []string{
"get", "delete", "create", "update", "patch", "list", "watch", "deletecollection",
},
},
},
}
}
func newViewClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubevirt.io:view",
Labels: map[string]string{
virtv1.AppLabel: "",
"rbac.authorization.k8s.io/aggregate-to-view": "true",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiKubevirts,
},
Verbs: []string{
"get", "list",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiVMExpandSpec,
apiVMInstancesGuestOSInfo,
apiVMInstancesFileSysList,
apiVMInstancesUserList,
apiVMInstancesSEVFetchCertChain,
apiVMInstancesSEVQueryLaunchMeasurement,
apiVMObjectGraph,
apiVMInstancesObjectGraph,
},
Verbs: []string{
"get",
},
},
{
APIGroups: []string{
virtv1.SubresourceGroupName,
},
Resources: []string{
apiExpandVmSpec,
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
GroupName,
},
Resources: []string{
apiVM,
apiVMInstances,
apiVMIPresets,
apiVMIReplicasets,
apiVMIMigrations,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
snapshot.GroupName,
},
Resources: []string{
apiVMSnapshots,
apiVMSnapshotContents,
apiVMRestores,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
backup.GroupName,
},
Resources: []string{
apiVMBackups,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
export.GroupName,
},
Resources: []string{
apiVMExports,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
clone.GroupName,
},
Resources: []string{
apiVMClones,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
instancetype.GroupName,
},
Resources: []string{
instancetype.PluralResourceName,
instancetype.ClusterPluralResourceName,
instancetype.PluralPreferenceResourceName,
instancetype.ClusterPluralPreferenceResourceName,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
pool.GroupName,
},
Resources: []string{
apiVMPools,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
migrations.GroupName,
},
Resources: []string{
migrations.ResourceMigrationPolicies,
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newInstancetypeViewClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: instancetypeViewClusterRoleName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
instancetype.GroupName,
},
Resources: []string{
instancetype.ClusterPluralResourceName,
instancetype.ClusterPluralPreferenceResourceName,
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newInstancetypeViewClusterRoleBinding() *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: instancetypeViewClusterRoleName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
Annotations: map[string]string{
"rbac.authorization.kubernetes.io/autoupdate": "true",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: VersionName,
Kind: "ClusterRole",
Name: instancetypeViewClusterRoleName,
},
Subjects: []rbacv1.Subject{
{
Kind: "Group",
APIGroup: VersionName,
Name: "system:authenticated",
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package rbac
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"kubevirt.io/api/clone"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
"kubevirt.io/api/instancetype"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/migrations"
)
func GetAllController(namespace string) []runtime.Object {
return []runtime.Object{
newControllerServiceAccount(namespace),
newControllerClusterRole(),
newControllerClusterRoleBinding(namespace),
newControllerRole(namespace),
newControllerRoleBinding(namespace),
}
}
func newControllerServiceAccount(namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: components.ControllerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
}
}
func newControllerRole(namespace string) *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.ControllerServiceAccountName,
Namespace: namespace,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
GroupNameRoute,
},
Resources: []string{
"routes",
},
Verbs: []string{
"list",
"get",
"watch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"secrets",
},
Verbs: []string{
"list",
"get",
"watch",
},
},
{
APIGroups: []string{
"networking.k8s.io",
},
Resources: []string{
"ingresses",
},
Verbs: []string{
"list",
"get",
"watch",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
Resources: []string{
"leases",
},
Verbs: []string{
"get", "list", "watch", "delete", "update", "create", "patch",
},
},
},
}
}
func newControllerRoleBinding(namespace string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.ControllerServiceAccountName,
Namespace: namespace,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: VersionName,
Kind: "Role",
Name: components.ControllerServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.ControllerServiceAccountName,
},
},
}
}
func newControllerClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.ControllerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"namespaces",
},
Verbs: []string{
"get",
"list",
"watch",
"patch",
},
},
{
APIGroups: []string{
"policy",
},
Resources: []string{
"poddisruptionbudgets",
},
Verbs: []string{
"get", "list", "watch", "delete", "create", "patch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"pods", "configmaps", "endpoints", "services",
},
Verbs: []string{
"get", "list", "watch", "delete", "update", "create", "patch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"events",
},
Verbs: []string{
"update", "create", "patch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"secrets",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"pods/finalizers",
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"pods/eviction",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"pods/status",
},
Verbs: []string{
"patch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"nodes",
},
Verbs: []string{
"get", "list", "watch", "update", "patch",
},
},
{
APIGroups: []string{
"apps",
},
Resources: []string{
"daemonsets",
},
Verbs: []string{
"list",
},
},
{
APIGroups: []string{
"apps",
},
Resources: []string{
"controllerrevisions",
},
Verbs: []string{
"watch",
"list",
"create",
"delete",
"get",
"update",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"persistentvolumeclaims",
},
Verbs: []string{
"get", "list", "watch", "create", "update", "delete", "patch",
},
},
{
APIGroups: []string{
"snapshot.kubevirt.io",
},
Resources: []string{
"virtualmachinesnapshots",
"virtualmachinesnapshots/status",
"virtualmachinesnapshots/finalizers",
"virtualmachinesnapshotcontents",
"virtualmachinesnapshotcontents/status",
"virtualmachinesnapshotcontents/finalizers",
"virtualmachinerestores",
"virtualmachinerestores/status",
},
Verbs: []string{
"get", "list", "watch", "create", "update", "delete", "patch",
},
},
{
APIGroups: []string{
"export.kubevirt.io",
},
Resources: []string{
"virtualmachineexports",
"virtualmachineexports/status",
"virtualmachineexports/finalizers",
},
Verbs: []string{
"get", "list", "watch", "create", "update", "delete", "patch",
},
},
{
APIGroups: []string{
"backup.kubevirt.io",
},
Resources: []string{
"virtualmachinebackups",
"virtualmachinebackups/status",
"virtualmachinebackups/finalizers",
},
Verbs: []string{
"get", "list", "watch", "create", "update", "delete", "patch",
},
},
{
APIGroups: []string{
"backup.kubevirt.io",
},
Resources: []string{
"virtualmachinebackuptrackers",
"virtualmachinebackuptrackers/status",
},
Verbs: []string{
"get", "list", "watch", "create", "update", "delete", "patch",
},
},
{
APIGroups: []string{
"pool.kubevirt.io",
},
Resources: []string{
"virtualmachinepools",
"virtualmachinepools/finalizers",
"virtualmachinepools/status",
"virtualmachinepools/scale",
},
Verbs: []string{
"watch",
"list",
"create",
"delete",
"update",
"patch",
"get",
},
},
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"*",
},
Verbs: []string{
"*",
},
},
// Implied by asterisk but prefer adding explicitly since it's crucial for OwnerReferencesPermissionEnforcement
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"virtualmachines/finalizers",
"virtualmachineinstances/finalizers",
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
"subresources.kubevirt.io",
},
Resources: []string{
"virtualmachines/stop",
"virtualmachineinstances/addvolume",
"virtualmachineinstances/removevolume",
"virtualmachineinstances/backup",
"virtualmachineinstances/freeze",
"virtualmachineinstances/unfreeze",
"virtualmachineinstances/reset",
"virtualmachineinstances/softreboot",
"virtualmachineinstances/sev/setupsession",
"virtualmachineinstances/sev/injectlaunchsecret",
},
Verbs: []string{
"update",
},
},
{
APIGroups: []string{
"cdi.kubevirt.io",
},
Resources: []string{
"*",
},
Verbs: []string{
"*",
},
},
{
APIGroups: []string{
"k8s.cni.cncf.io",
},
Resources: []string{
"network-attachment-definitions",
},
Verbs: []string{"get"},
},
{
APIGroups: []string{
"apiextensions.k8s.io",
},
Resources: []string{
"customresourcedefinitions",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
"authorization.k8s.io",
},
Resources: []string{
"subjectaccessreviews",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{
"snapshot.storage.k8s.io",
},
Resources: []string{
"volumesnapshotclasses",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
"snapshot.storage.k8s.io",
},
Resources: []string{
"volumesnapshots",
},
Verbs: []string{
"get",
"list",
"watch",
"create",
"update",
"delete",
},
},
{
APIGroups: []string{
"storage.k8s.io",
},
Resources: []string{
"storageclasses",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
"instancetype.kubevirt.io",
},
Resources: []string{
instancetype.PluralResourceName,
instancetype.ClusterPluralResourceName,
instancetype.PluralPreferenceResourceName,
instancetype.ClusterPluralPreferenceResourceName,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
migrations.GroupName,
},
Resources: []string{
migrations.ResourceMigrationPolicies,
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
clone.GroupName,
},
Resources: []string{
clone.ResourceVMClonePlural,
clone.ResourceVMClonePlural + "/status",
clone.ResourceVMClonePlural + "/finalizers",
},
Verbs: []string{
"get", "list", "watch", "update", "patch", "delete",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"namespaces",
},
Verbs: []string{
"get",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"resourcequotas",
},
Verbs: []string{
"list",
"watch",
},
},
{
APIGroups: []string{
"batch",
},
Resources: []string{
"jobs",
},
Verbs: []string{
"create",
"get",
"delete",
},
},
{
APIGroups: []string{
"resource.k8s.io",
},
Resources: []string{
"resourceslices",
"resourceclaims",
},
Verbs: []string{
"list",
"watch",
"get",
},
},
},
}
}
func newControllerClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.ControllerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: components.ControllerServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.ControllerServiceAccountName,
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package rbac
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
virtv1 "kubevirt.io/api/core/v1"
)
const ExportProxyServiceAccountName = "kubevirt-exportproxy"
func GetAllExportProxy(namespace string) []runtime.Object {
return []runtime.Object{
newExportProxyServiceAccount(namespace),
newExportProxyClusterRole(),
newExportProxyClusterRoleBinding(namespace),
newExportProxyRole(namespace),
newExportProxyRoleBinding(namespace),
}
}
func newExportProxyServiceAccount(namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: ExportProxyServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
}
}
func newExportProxyClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: ExportProxyServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"export.kubevirt.io",
},
Resources: []string{
"virtualmachineexports",
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"kubevirts",
},
Verbs: []string{
"list",
"watch",
},
},
},
}
}
func newExportProxyClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: ExportProxyServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: ExportProxyServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: ExportProxyServiceAccountName,
},
},
}
}
func newExportProxyRole(namespace string) *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: ExportProxyServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"configmaps",
},
Verbs: []string{
"get", "list", "watch",
},
ResourceNames: []string{
"kubevirt-export-ca",
},
},
},
}
}
func newExportProxyRoleBinding(namespace string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: ExportProxyServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: ExportProxyServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: ExportProxyServiceAccountName,
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package rbac
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/api/migrations"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
)
func GetAllHandler(namespace string) []runtime.Object {
return []runtime.Object{
newHandlerServiceAccount(namespace),
newHandlerClusterRole(),
newHandlerClusterRoleBinding(namespace),
newHandlerRole(namespace),
newHandlerRoleBinding(namespace),
}
}
func newHandlerServiceAccount(namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: components.HandlerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
}
}
func newHandlerClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.HandlerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"virtualmachineinstances",
},
Verbs: []string{
"update", "list", "watch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"nodes",
},
Verbs: []string{
"patch",
"list",
"watch",
"get",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"configmaps",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"events",
},
Verbs: []string{
"create", "patch",
},
},
{
APIGroups: []string{
"apiextensions.k8s.io",
},
Resources: []string{
"customresourcedefinitions",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"kubevirts",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
migrations.GroupName,
},
Resources: []string{
migrations.ResourceMigrationPolicies,
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newHandlerClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.HandlerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: components.HandlerServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.HandlerServiceAccountName,
},
},
}
}
func newHandlerRole(namespace string) *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: components.HandlerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"configmaps",
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newHandlerRoleBinding(namespace string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: components.HandlerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: components.HandlerServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.HandlerServiceAccountName,
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package rbac
import (
"fmt"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/virt-operator/resource/generate/components"
)
const (
GroupNameSecurity = "security.openshift.io"
GroupNameRoute = "route.openshift.io"
serviceAccountFmt = "%s:%s:%s"
)
// Used for manifest generation only, not by the operator itself
func GetAllOperator(namespace string) []interface{} {
return []interface{}{
newOperatorServiceAccount(namespace),
NewOperatorRole(namespace),
newOperatorRoleBinding(namespace),
NewOperatorClusterRole(),
newOperatorClusterRoleBinding(namespace),
}
}
func newOperatorServiceAccount(namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: components.OperatorServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
}
}
// public, because it's used in manifest-templator
func NewOperatorClusterRole() *rbacv1.ClusterRole {
// These are permissions needed by the operator itself.
// For successfully deploying KubeVirt with the operator, you need to add everything
// that the KubeVirt components' rules use, see below
// (you can't create rules with permissions you don't have yourself)
operatorRole := &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.OperatorServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"kubevirts",
},
Verbs: []string{
"get",
"list",
"watch",
"patch",
"update",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"serviceaccounts",
"services",
"endpoints",
// pods/exec is required for testing upgrades - that can be removed when we stop
// supporting upgrades from versions in which virt-api required pods/exec privileges
"pods/exec",
},
Verbs: []string{
"get",
"list",
"watch",
"create",
"update",
"delete",
"patch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"configmaps",
},
Verbs: []string{
"patch",
"delete",
},
},
{
APIGroups: []string{
"batch",
},
Resources: []string{
"jobs",
},
Verbs: []string{
"get",
"list",
"watch",
"create",
"delete",
"patch",
},
},
{
APIGroups: []string{
"apps",
},
Resources: []string{
"controllerrevisions",
},
Verbs: []string{
"watch",
"list",
"create",
"delete",
"patch",
},
},
{
APIGroups: []string{
"apps",
},
Resources: []string{
"deployments",
"daemonsets",
},
Verbs: []string{
"get",
"list",
"watch",
"create",
"delete",
"patch",
},
},
{
APIGroups: []string{
VersionName,
},
Resources: []string{
"clusterroles",
"clusterrolebindings",
"roles",
"rolebindings",
},
Verbs: []string{
"get",
"list",
"watch",
"create",
"delete",
"patch",
"update",
},
},
{
APIGroups: []string{
"apiextensions.k8s.io",
},
Resources: []string{
"customresourcedefinitions",
},
Verbs: []string{
"get",
"list",
"watch",
"create",
"delete",
"patch",
},
},
{
APIGroups: []string{
GroupNameSecurity,
},
Resources: []string{
"securitycontextconstraints",
},
Verbs: []string{
"create",
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
GroupNameSecurity,
},
Resources: []string{
"securitycontextconstraints",
},
ResourceNames: []string{
"privileged",
},
Verbs: []string{
"get",
"patch",
"update",
},
},
{
APIGroups: []string{
GroupNameSecurity,
},
Resources: []string{
"securitycontextconstraints",
},
ResourceNames: []string{
"kubevirt-handler",
"kubevirt-controller",
},
Verbs: []string{
"get",
"list",
"watch",
"update",
"delete",
},
},
{
APIGroups: []string{
"admissionregistration.k8s.io",
},
Resources: []string{
"validatingwebhookconfigurations",
"mutatingwebhookconfigurations",
"validatingadmissionpolicybindings",
"validatingadmissionpolicies",
},
Verbs: []string{
"get", "list", "watch", "create", "delete", "update", "patch",
},
},
{
APIGroups: []string{
"apiregistration.k8s.io",
},
Resources: []string{
"apiservices",
},
Verbs: []string{
"get", "list", "watch", "create", "delete", "update", "patch",
},
},
{
APIGroups: []string{
"monitoring.coreos.com",
},
Resources: []string{
"servicemonitors",
"prometheusrules",
},
Verbs: []string{
"get", "list", "watch", "create", "delete", "update", "patch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"namespaces",
},
Verbs: []string{
"get",
"list",
"watch",
"patch",
},
},
},
}
// now append all rules needed by KubeVirt's components
operatorRole.Rules = append(operatorRole.Rules, getKubeVirtComponentsClusterRules()...)
return operatorRole
}
func getKubeVirtComponentsClusterRules() []rbacv1.PolicyRule {
var rules []rbacv1.PolicyRule
// namespace doesn't matter, we are only interested in the rules of ClusterRoles
all := GetAllApiServer("")
all = append(all, GetAllController("")...)
all = append(all, GetAllHandler("")...)
all = append(all, GetAllExportProxy("")...)
all = append(all, GetAllSynchronizationController("")...)
all = append(all, GetAllCluster()...)
for _, resource := range all {
switch resource.(type) {
case *rbacv1.ClusterRole:
role, _ := resource.(*rbacv1.ClusterRole)
rules = append(rules, role.Rules...)
}
}
// OLM doesn't support role refs
// so we need special handling for auth delegation for the apiserver,
// by adding the rules of the system:auth-delegator role manually
authDelegationRules := []rbacv1.PolicyRule{
{
APIGroups: []string{
"authentication.k8s.io",
},
Resources: []string{
"tokenreviews",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{
"authorization.k8s.io",
},
Resources: []string{
"subjectaccessreviews",
},
Verbs: []string{
"create",
},
},
}
rules = append(rules, authDelegationRules...)
return rules
}
func getKubeVirtComponentsRules() []rbacv1.PolicyRule {
var rules []rbacv1.PolicyRule
// namespace doesn't matter, we are only interested in the rules
all := GetAllApiServer("")
all = append(all, GetAllController("")...)
all = append(all, GetAllHandler("")...)
all = append(all, GetAllExportProxy("")...)
all = append(all, GetAllSynchronizationController("")...)
all = append(all, GetAllCluster()...)
for _, resource := range all {
switch resource.(type) {
case *rbacv1.Role:
role, _ := resource.(*rbacv1.Role)
rules = append(rules, role.Rules...)
}
}
return rules
}
func newOperatorClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.OperatorServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: VersionName,
Kind: "ClusterRole",
Name: components.OperatorServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.OperatorServiceAccountName,
},
},
}
}
func newOperatorRoleBinding(namespace string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kubevirt-operator-rolebinding",
Namespace: namespace,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: VersionName,
Kind: "Role",
Name: components.OperatorServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: components.OperatorServiceAccountName,
},
},
}
}
// NewOperatorRole creates a Role object for kubevirt-operator.
func NewOperatorRole(namespace string) *rbacv1.Role {
operatorRole := &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: VersionNamev1,
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: components.OperatorServiceAccountName,
Namespace: namespace,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"secrets",
},
ResourceNames: []string{
components.KubeVirtCASecretName,
components.KubeVirtExportCASecretName,
components.VirtHandlerCertSecretName,
components.VirtHandlerServerCertSecretName,
components.VirtHandlerMigrationClientCertSecretName,
components.VirtHandlerVsockClientCertSecretName,
components.VirtOperatorCertSecretName,
components.VirtApiCertSecretName,
components.VirtControllerCertSecretName,
components.VirtExportProxyCertSecretName,
components.VirtSynchronizationControllerCertSecretName,
components.VirtSynchronizationControllerServerCertSecretName,
},
Verbs: []string{
"create",
"get",
"list",
"watch",
"patch",
"delete",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"configmaps",
},
Verbs: []string{
"create",
"get",
"list",
"watch",
"patch",
"delete",
},
},
{
APIGroups: []string{
GroupNameRoute,
},
Resources: []string{
"routes",
},
Verbs: []string{
"create",
"get",
"list",
"watch",
"patch",
"delete",
},
},
{
APIGroups: []string{
GroupNameRoute,
},
Resources: []string{
"routes/custom-host",
},
Verbs: []string{
"create",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
Resources: []string{
"leases",
},
Verbs: []string{
"get", "list", "watch", "delete", "update", "create", "patch",
},
},
},
}
operatorRole.Rules = append(operatorRole.Rules, getKubeVirtComponentsRules()...)
return operatorRole
}
func GetKubevirtComponentsServiceAccounts(namespace string) map[string]bool {
usermap := make(map[string]bool)
prefix := "system:serviceaccount"
usermap[fmt.Sprintf(serviceAccountFmt, prefix, namespace, components.HandlerServiceAccountName)] = true
usermap[fmt.Sprintf(serviceAccountFmt, prefix, namespace, components.ApiServiceAccountName)] = true
usermap[fmt.Sprintf(serviceAccountFmt, prefix, namespace, components.ControllerServiceAccountName)] = true
usermap[fmt.Sprintf(serviceAccountFmt, prefix, namespace, components.OperatorServiceAccountName)] = true
return usermap
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package rbac
import (
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
virtv1 "kubevirt.io/api/core/v1"
)
const MONITOR_SERVICEACCOUNT_NAME = "kubevirt-monitoring"
func GetAllServiceMonitor(namespace string, monitorNamespace string, monitorServiceAccount string) []runtime.Object {
return []runtime.Object{
newServiceMonitorRole(namespace),
newServiceMonitorRoleBinding(namespace, monitorNamespace, monitorServiceAccount),
}
}
func newServiceMonitorRole(namespace string) *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: MONITOR_SERVICEACCOUNT_NAME,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"services",
"endpoints",
"pods",
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newServiceMonitorRoleBinding(namespace string, monitorNamespace string, monitorServiceAccount string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: MONITOR_SERVICEACCOUNT_NAME,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: MONITOR_SERVICEACCOUNT_NAME,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: monitorNamespace,
Name: monitorServiceAccount,
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package rbac
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
virtv1 "kubevirt.io/api/core/v1"
)
const SynchronizationControllerServiceAccountName = "kubevirt-synchronization-controller"
func GetAllSynchronizationController(namespace string) []runtime.Object {
return []runtime.Object{
newSynchronizationControllerServiceAccount(namespace),
newSynchronizationControllerClusterRole(),
newSynchronizationControllerClusterRoleBinding(namespace),
newSynchronizationControllerRole(namespace),
newSynchronizationControllerRoleBinding(namespace),
}
}
func newSynchronizationControllerServiceAccount(namespace string) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: SynchronizationControllerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
}
}
func newSynchronizationControllerClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: SynchronizationControllerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"virtualmachineinstances",
},
Verbs: []string{
"get", "list", "watch", "update", "patch",
},
},
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"virtualmachineinstancemigrations",
},
Verbs: []string{
"get", "list", "watch", "patch", "delete",
},
},
{
APIGroups: []string{
"kubevirt.io",
},
Resources: []string{
"kubevirts",
},
Verbs: []string{
"get", "list", "watch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"events",
},
Verbs: []string{
"update", "create", "patch",
},
},
{
APIGroups: []string{
"apiextensions.k8s.io",
},
Resources: []string{
"customresourcedefinitions",
},
Verbs: []string{
"get", "list", "watch",
},
},
},
}
}
func newSynchronizationControllerClusterRoleBinding(namespace string) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: SynchronizationControllerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: SynchronizationControllerServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: SynchronizationControllerServiceAccountName,
},
},
}
}
func newSynchronizationControllerRole(namespace string) *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: SynchronizationControllerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"configmaps",
},
Verbs: []string{
"get", "list", "watch",
},
ResourceNames: []string{
"kubevirt-ca",
},
},
{
APIGroups: []string{
"coordination.k8s.io",
},
Resources: []string{
"leases",
},
Verbs: []string{
"get", "list", "watch", "delete", "update", "create", "patch",
},
},
},
}
}
func newSynchronizationControllerRoleBinding(namespace string) *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: SynchronizationControllerServiceAccountName,
Labels: map[string]string{
virtv1.AppLabel: "",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: SynchronizationControllerServiceAccountName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: SynchronizationControllerServiceAccountName,
},
},
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package placement
import (
corev1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
)
type DefaultInfraComponentsNodePlacement int
const (
AnyNode DefaultInfraComponentsNodePlacement = iota
RequireControlPlanePreferNonWorker
)
const (
KubernetesOSLabel = corev1.LabelOSStable
KubernetesOSLinux = "linux"
)
// InjectPlacementMetadata merges all Tolerations, Affinity and NodeSelectors from NodePlacement into pod spec
func InjectPlacementMetadata(componentConfig *v1.ComponentConfig, podSpec *corev1.PodSpec, nodePlacementOption DefaultInfraComponentsNodePlacement) {
if podSpec == nil {
podSpec = &corev1.PodSpec{}
}
if componentConfig == nil || componentConfig.NodePlacement == nil {
switch nodePlacementOption {
case AnyNode:
componentConfig = &v1.ComponentConfig{NodePlacement: &v1.NodePlacement{}}
case RequireControlPlanePreferNonWorker:
componentConfig = &v1.ComponentConfig{
NodePlacement: &v1.NodePlacement{
Affinity: &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: "node-role.kubernetes.io/control-plane",
Operator: corev1.NodeSelectorOpExists,
},
},
},
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: "node-role.kubernetes.io/master",
Operator: corev1.NodeSelectorOpExists,
},
},
},
},
},
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{
{
Weight: 100,
Preference: corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: "node-role.kubernetes.io/worker",
Operator: corev1.NodeSelectorOpDoesNotExist,
},
},
},
},
},
},
},
Tolerations: []corev1.Toleration{
{
Key: "node-role.kubernetes.io/control-plane",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/master",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoSchedule,
},
},
},
}
default:
log.Log.Errorf("Unknown nodePlacementOption %d provided to InjectPlacementMetadata. Falling back to the AnyNode option", nodePlacementOption)
componentConfig = &v1.ComponentConfig{NodePlacement: &v1.NodePlacement{}}
}
}
nodePlacement := componentConfig.NodePlacement
if len(nodePlacement.NodeSelector) == 0 {
nodePlacement.NodeSelector = make(map[string]string)
}
if _, ok := nodePlacement.NodeSelector[KubernetesOSLabel]; !ok {
nodePlacement.NodeSelector[KubernetesOSLabel] = KubernetesOSLinux
}
if len(podSpec.NodeSelector) == 0 {
podSpec.NodeSelector = make(map[string]string, len(nodePlacement.NodeSelector))
}
// podSpec.NodeSelector
for nsKey, nsVal := range nodePlacement.NodeSelector {
// Favor podSpec over NodePlacement. This prevents cluster admin from clobbering
// node selectors that KubeVirt intentionally set.
if _, ok := podSpec.NodeSelector[nsKey]; !ok {
podSpec.NodeSelector[nsKey] = nsVal
}
}
// podSpec.Affinity
if nodePlacement.Affinity != nil {
if podSpec.Affinity == nil {
podSpec.Affinity = nodePlacement.Affinity.DeepCopy()
} else {
// podSpec.Affinity.NodeAffinity
if nodePlacement.Affinity.NodeAffinity != nil {
if podSpec.Affinity.NodeAffinity == nil {
podSpec.Affinity.NodeAffinity = nodePlacement.Affinity.NodeAffinity.DeepCopy()
} else {
// need to copy all affinity terms one by one
if nodePlacement.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
if podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = nodePlacement.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.DeepCopy()
} else {
// merge the list of terms from NodePlacement into podSpec
for _, term := range nodePlacement.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, term)
}
}
}
//PreferredDuringSchedulingIgnoredDuringExecution
for _, term := range nodePlacement.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
podSpec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(podSpec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, term)
}
}
}
// podSpec.Affinity.PodAffinity
if nodePlacement.Affinity.PodAffinity != nil {
if podSpec.Affinity.PodAffinity == nil {
podSpec.Affinity.PodAffinity = nodePlacement.Affinity.PodAffinity.DeepCopy()
} else {
//RequiredDuringSchedulingIgnoredDuringExecution
for _, term := range nodePlacement.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
podSpec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(podSpec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, term)
}
//PreferredDuringSchedulingIgnoredDuringExecution
for _, term := range nodePlacement.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
podSpec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(podSpec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, term)
}
}
}
// podSpec.Affinity.PodAntiAffinity
if nodePlacement.Affinity.PodAntiAffinity != nil {
if podSpec.Affinity.PodAntiAffinity == nil {
podSpec.Affinity.PodAntiAffinity = nodePlacement.Affinity.PodAntiAffinity.DeepCopy()
} else {
//RequiredDuringSchedulingIgnoredDuringExecution
for _, term := range nodePlacement.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
podSpec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(podSpec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, term)
}
//PreferredDuringSchedulingIgnoredDuringExecution
for _, term := range nodePlacement.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
podSpec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(podSpec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, term)
}
}
}
}
}
//podSpec.Tolerations
if len(nodePlacement.Tolerations) != 0 {
if len(podSpec.Tolerations) == 0 {
podSpec.Tolerations = []corev1.Toleration{}
}
for _, toleration := range nodePlacement.Tolerations {
podSpec.Tolerations = append(podSpec.Tolerations, toleration)
}
}
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package util
import (
"fmt"
"time"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/client-go/discovery"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
virtv1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/version"
)
const (
KubeVirtFinalizer string = "foregroundDeleteKubeVirt"
ConditionReasonDeploymentFailedExisting = "ExistingDeployment"
ConditionReasonDeploymentFailedError = "DeploymentFailed"
ConditionReasonDeletionFailedError = "DeletionFailed"
ConditionReasonDeploymentCreated = "AllResourcesCreated"
ConditionReasonDeploymentReady = "AllComponentsReady"
ConditionReasonDeploying = "DeploymentInProgress"
ConditionReasonUpdating = "UpdateInProgress"
ConditionReasonDeleting = "DeletionInProgress"
)
func UpdateConditionsDeploying(kv *virtv1.KubeVirt) {
removeCondition(kv, virtv1.KubeVirtConditionSynchronized)
msg := fmt.Sprintf("Deploying version %s with registry %s",
kv.Status.TargetKubeVirtVersion,
kv.Status.TargetKubeVirtRegistry)
updateCondition(kv, virtv1.KubeVirtConditionAvailable, k8sv1.ConditionFalse, ConditionReasonDeploying, msg)
updateCondition(kv, virtv1.KubeVirtConditionProgressing, k8sv1.ConditionTrue, ConditionReasonDeploying, msg)
updateCondition(kv, virtv1.KubeVirtConditionDegraded, k8sv1.ConditionFalse, ConditionReasonDeploying, msg)
}
func UpdateConditionsUpdating(kv *virtv1.KubeVirt) {
removeCondition(kv, virtv1.KubeVirtConditionCreated)
removeCondition(kv, virtv1.KubeVirtConditionSynchronized)
msg := fmt.Sprintf("Transitioning from previous version %s with registry %s to target version %s using registry %s",
kv.Status.ObservedKubeVirtVersion,
kv.Status.ObservedKubeVirtRegistry,
kv.Status.TargetKubeVirtVersion,
kv.Status.TargetKubeVirtRegistry)
updateCondition(kv, virtv1.KubeVirtConditionAvailable, k8sv1.ConditionTrue, ConditionReasonUpdating, msg)
updateCondition(kv, virtv1.KubeVirtConditionProgressing, k8sv1.ConditionTrue, ConditionReasonUpdating, msg)
updateCondition(kv, virtv1.KubeVirtConditionDegraded, k8sv1.ConditionTrue, ConditionReasonUpdating, msg)
}
func UpdateConditionsCreated(kv *virtv1.KubeVirt) {
updateCondition(kv, virtv1.KubeVirtConditionCreated, k8sv1.ConditionTrue, ConditionReasonDeploymentCreated, "All resources were created.")
}
func UpdateConditionsAvailable(kv *virtv1.KubeVirt) {
msg := "All components are ready."
updateCondition(kv, virtv1.KubeVirtConditionAvailable, k8sv1.ConditionTrue, ConditionReasonDeploymentReady, msg)
updateCondition(kv, virtv1.KubeVirtConditionProgressing, k8sv1.ConditionFalse, ConditionReasonDeploymentReady, msg)
updateCondition(kv, virtv1.KubeVirtConditionDegraded, k8sv1.ConditionFalse, ConditionReasonDeploymentReady, msg)
}
func UpdateConditionsFailedExists(kv *virtv1.KubeVirt) {
updateCondition(kv, virtv1.KubeVirtConditionSynchronized, k8sv1.ConditionFalse, ConditionReasonDeploymentFailedExisting, "There is an active KubeVirt deployment")
// don' t set any other conditions here, so HCO just ignores this KubeVirt CR
}
func UpdateConditionsFailedError(kv *virtv1.KubeVirt, err error) {
msg := fmt.Sprintf("An error occurred during deployment: %v", err)
updateCondition(kv, virtv1.KubeVirtConditionSynchronized, k8sv1.ConditionFalse, ConditionReasonDeploymentFailedError, msg)
updateCondition(kv, virtv1.KubeVirtConditionAvailable, k8sv1.ConditionFalse, ConditionReasonDeploymentFailedError, msg)
updateCondition(kv, virtv1.KubeVirtConditionProgressing, k8sv1.ConditionFalse, ConditionReasonDeploymentFailedError, msg)
updateCondition(kv, virtv1.KubeVirtConditionDegraded, k8sv1.ConditionTrue, ConditionReasonDeploymentFailedError, msg)
}
func UpdateConditionsDeleting(kv *virtv1.KubeVirt) {
removeCondition(kv, virtv1.KubeVirtConditionCreated)
removeCondition(kv, virtv1.KubeVirtConditionSynchronized)
msg := fmt.Sprintf("Deletion was triggered")
updateCondition(kv, virtv1.KubeVirtConditionAvailable, k8sv1.ConditionFalse, ConditionReasonDeleting, msg)
updateCondition(kv, virtv1.KubeVirtConditionProgressing, k8sv1.ConditionFalse, ConditionReasonDeleting, msg)
updateCondition(kv, virtv1.KubeVirtConditionDegraded, k8sv1.ConditionTrue, ConditionReasonDeleting, msg)
}
func UpdateConditionsDeletionFailed(kv *virtv1.KubeVirt, err error) {
updateCondition(kv, virtv1.KubeVirtConditionSynchronized, k8sv1.ConditionFalse, ConditionReasonDeletionFailedError, fmt.Sprintf("An error occurred during deletion: %v", err))
}
func updateCondition(kv *virtv1.KubeVirt, conditionType virtv1.KubeVirtConditionType, status k8sv1.ConditionStatus, reason string, message string) {
condition, isNew := getCondition(kv, conditionType)
condition.Status = status
condition.Reason = reason
condition.Message = message
conditions := kv.Status.Conditions
if isNew {
conditions = append(conditions, *condition)
} else {
for i := range conditions {
if conditions[i].Type == conditionType {
conditions[i] = *condition
break
}
}
}
kv.Status.Conditions = conditions
}
func getCondition(kv *virtv1.KubeVirt, conditionType virtv1.KubeVirtConditionType) (*virtv1.KubeVirtCondition, bool) {
for _, condition := range kv.Status.Conditions {
if condition.Type == conditionType {
return &condition, false
}
}
condition := &virtv1.KubeVirtCondition{
Type: conditionType,
}
return condition, true
}
func removeCondition(kv *virtv1.KubeVirt, conditionType virtv1.KubeVirtConditionType) {
conditions := kv.Status.Conditions
for i, condition := range conditions {
if condition.Type == conditionType {
conditions = append(conditions[:i], conditions[i+1:]...)
kv.Status.Conditions = conditions
return
}
}
}
func SetConditionTimestamps(kvOrig *virtv1.KubeVirt, kvUpdated *virtv1.KubeVirt) {
now := metav1.Time{
Time: time.Now(),
}
for i, c := range kvUpdated.Status.Conditions {
if cOrig, created := getCondition(kvOrig, c.Type); !created {
// check if condition was updated
if cOrig.Status != c.Status ||
cOrig.Reason != c.Reason ||
cOrig.Message != c.Message {
kvUpdated.Status.Conditions[i].LastProbeTime = now
kvUpdated.Status.Conditions[i].LastTransitionTime = now
}
// do not update lastProbeTime only, will result in too many updates
} else {
// condition is new
kvUpdated.Status.Conditions[i].LastProbeTime = now
}
}
}
func AddFinalizer(kv *virtv1.KubeVirt) {
if !hasFinalizer(kv) {
kv.Finalizers = append(kv.Finalizers, KubeVirtFinalizer)
}
}
func hasFinalizer(kv *virtv1.KubeVirt) bool {
for _, f := range kv.GetFinalizers() {
if f == KubeVirtFinalizer {
return true
}
}
return false
}
func SetOperatorVersion(kv *virtv1.KubeVirt) {
kv.Status.OperatorVersion = version.Get().String()
}
func IsServiceMonitorEnabled(clientset kubecli.KubevirtClient) (bool, error) {
_, apis, err := clientset.DiscoveryClient().ServerGroupsAndResources()
if err != nil && !discovery.IsGroupDiscoveryFailedError(err) {
return false, err
}
for _, api := range apis {
if api.GroupVersion == promv1.SchemeGroupVersion.String() {
for _, resource := range api.APIResources {
if resource.Name == "servicemonitors" {
return true, nil
}
}
}
}
return false, nil
}
// IsPrometheusRuleEnabled returns true if prometheusrules cr is defined
// and false otherwise.
func IsPrometheusRuleEnabled(clientset kubecli.KubevirtClient) (bool, error) {
_, apis, err := clientset.DiscoveryClient().ServerGroupsAndResources()
if err != nil && !discovery.IsGroupDiscoveryFailedError(err) {
return false, err
}
for _, api := range apis {
if api.GroupVersion == promv1.SchemeGroupVersion.String() {
for _, resource := range api.APIResources {
if resource.Name == "prometheusrules" {
return true, nil
}
}
}
}
return false, nil
}
// IsValidatingAdmissionPolicyBindingEnabled returns true if ValidatingAdmissionPolicyBinding resource is defined
// and false otherwise.
func IsValidatingAdmissionPolicyBindingEnabled(clientset kubecli.KubevirtClient) (bool, error) {
_, apis, err := clientset.DiscoveryClient().ServerGroupsAndResources()
if err != nil && !discovery.IsGroupDiscoveryFailedError(err) {
return false, err
}
for _, api := range apis {
if api.GroupVersion == admissionregistrationv1.SchemeGroupVersion.String() {
for _, resource := range api.APIResources {
if resource.Name == "validatingadmissionpolicybindings" {
return true, nil
}
}
}
}
return false, nil
}
// IsValidatingAdmissionPolicyEnabled returns true if ValidatingAdmissionPolicy resource is defined
// and false otherwise.
func IsValidatingAdmissionPolicyEnabled(clientset kubecli.KubevirtClient) (bool, error) {
_, apis, err := clientset.DiscoveryClient().ServerGroupsAndResources()
if err != nil && !discovery.IsGroupDiscoveryFailedError(err) {
return false, err
}
for _, api := range apis {
if api.GroupVersion == admissionregistrationv1.SchemeGroupVersion.String() {
for _, resource := range api.APIResources {
if resource.Name == "validatingadmissionpolicies" {
return true, nil
}
}
}
}
return false, nil
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package util
import (
// #nosec sha1 used to calculate hash to identify the deployment and not as cryptographic info
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
k8sv1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
clientutil "kubevirt.io/client-go/util"
"kubevirt.io/kubevirt/pkg/virt-config/featuregate"
)
const (
// Name of env var containing the operator's image name
// Deprecated. Use VirtOperatorImageEnvName instead
OldOperatorImageEnvName = "OPERATOR_IMAGE"
VirtOperatorImageEnvName = "VIRT_OPERATOR_IMAGE"
VirtApiImageEnvName = "VIRT_API_IMAGE"
VirtControllerImageEnvName = "VIRT_CONTROLLER_IMAGE"
VirtHandlerImageEnvName = "VIRT_HANDLER_IMAGE"
VirtLauncherImageEnvName = "VIRT_LAUNCHER_IMAGE"
VirtExportProxyImageEnvName = "VIRT_EXPORTPROXY_IMAGE"
VirtExportServerImageEnvName = "VIRT_EXPORTSERVER_IMAGE"
VirtSynchronizationControllerImageEnvName = "VIRT_SYNCHRONIZATIONCONTROLLER_IMAGE"
GsImageEnvName = "GS_IMAGE"
PrHelperImageEnvName = "PR_HELPER_IMAGE"
SidecarShimImageEnvName = "SIDECAR_SHIM_IMAGE"
RunbookURLTemplate = "RUNBOOK_URL_TEMPLATE"
KubeVirtVersionEnvName = "KUBEVIRT_VERSION"
// Deprecated, use TargetDeploymentConfig instead
TargetInstallNamespace = "TARGET_INSTALL_NAMESPACE"
// Deprecated, use TargetDeploymentConfig instead
TargetImagePullPolicy = "TARGET_IMAGE_PULL_POLICY"
// JSON containing all relevant deployment properties, replaces TargetInstallNamespace and TargetImagePullPolicy
TargetDeploymentConfig = "TARGET_DEPLOYMENT_CONFIG"
// these names need to match field names from KubeVirt Spec if they are set from there
AdditionalPropertiesNamePullPolicy = "ImagePullPolicy"
AdditionalPropertiesPullSecrets = "ImagePullSecrets"
// lookup key in AdditionalProperties
AdditionalPropertiesMonitorNamespace = "MonitorNamespace"
// lookup key in AdditionalProperties
AdditionalPropertiesServiceMonitorNamespace = "ServiceMonitorNamespace"
// lookup key in AdditionalProperties
AdditionalPropertiesMonitorServiceAccount = "MonitorAccount"
// lookup key in AdditionalProperties
AdditionalPropertiesMigrationNetwork = "MigrationNetwork"
// lookup key in AdditionalProperties
AdditionalPropertiesPersistentReservationEnabled = "PersistentReservationEnabled"
// lookup key in AdditionalProperties
AdditionalPropertiesSynchronizationPort = "SynchronizationPort"
DefaultSynchronizationPort int32 = 9185
// account to use if one is not explicitly named
DefaultMonitorAccount = "prometheus-k8s"
// lookup keys in AdditionalProperties
ImagePrefixKey = "imagePrefix"
ProductNameKey = "productName"
ProductComponentKey = "productComponent"
ProductVersionKey = "productVersion"
// the regex used to parse the operator image
operatorImageRegex = "^(.*)/(.*)virt-operator([@:].*)?$"
// #nosec 101, the variable is not holding any credential
// Prefix for env vars that will be passed along
PassthroughEnvPrefix = "KV_IO_EXTRA_ENV_"
)
// DefaultMonitorNamespaces holds a set of well known prometheus-operator namespaces.
// Ordering in the list matters. First entries have precedence.
var DefaultMonitorNamespaces = []string{
"openshift-monitoring", // default namespace in openshift
"monitoring", // default namespace of https://github.com/prometheus-operator/kube-prometheus
}
type KubeVirtDeploymentConfig struct {
ID string `json:"id,omitempty" optional:"true"`
Namespace string `json:"namespace,omitempty" optional:"true"`
Registry string `json:"registry,omitempty" optional:"true"`
ImagePrefix string `json:"imagePrefix,omitempty" optional:"true"`
// the KubeVirt version
// matches the image tag, if tags are used, either by the manifest, or by the KubeVirt CR
// used on the KubeVirt CR status and on annotations, and for determining up-/downgrade paths
KubeVirtVersion string `json:"kubeVirtVersion,omitempty" optional:"true"`
// the images names of every image we use
VirtOperatorImage string `json:"virtOperatorImage,omitempty" optional:"true"`
VirtApiImage string `json:"virtApiImage,omitempty" optional:"true"`
VirtControllerImage string `json:"virtControllerImage,omitempty" optional:"true"`
VirtHandlerImage string `json:"virtHandlerImage,omitempty" optional:"true"`
VirtLauncherImage string `json:"virtLauncherImage,omitempty" optional:"true"`
VirtExportProxyImage string `json:"virtExportProxyImage,omitempty" optional:"true"`
VirtExportServerImage string `json:"virtExportServerImage,omitempty" optional:"true"`
VirtSynchronizationControllerImage string `json:"virtSynchronizationControllerImage,omitempty" optional:"true"`
GsImage string `json:"GsImage,omitempty" optional:"true"`
PrHelperImage string `json:"PrHelperImage,omitempty" optional:"true"`
SidecarShimImage string `json:"SidecarShimImage,omitempty" optional:"true"`
// everything else, which can e.g. come from KubeVirt CR spec
AdditionalProperties map[string]string `json:"additionalProperties,omitempty" optional:"true"`
// environment variables from virt-operator to pass along
PassthroughEnvVars map[string]string `json:"passthroughEnvVars,omitempty" optional:"true"`
}
var DefaultEnvVarManager EnvVarManager = EnvVarManagerImpl{}
func GetConfigFromEnv() (*KubeVirtDeploymentConfig, error) {
return GetConfigFromEnvWithEnvVarManager(DefaultEnvVarManager)
}
func GetConfigFromEnvWithEnvVarManager(envVarManager EnvVarManager) (*KubeVirtDeploymentConfig, error) {
// first check if we have the new deployment config json
c := envVarManager.Getenv(TargetDeploymentConfig)
if c != "" {
config := &KubeVirtDeploymentConfig{}
if err := json.Unmarshal([]byte(c), config); err != nil {
return nil, err
}
return config, nil
}
// for backwards compatibility: check for namespace and pullpolicy from deprecated env vars
ns := envVarManager.Getenv(TargetInstallNamespace)
if ns == "" {
var err error
ns, err = clientutil.GetNamespace()
if err != nil {
return nil, err
}
}
pullPolicy := envVarManager.Getenv(TargetImagePullPolicy)
additionalProperties := make(map[string]string)
additionalProperties[AdditionalPropertiesNamePullPolicy] = pullPolicy
return getConfig("", "", ns, additionalProperties, envVarManager), nil
}
func GetTargetConfigFromKV(kv *v1.KubeVirt) *KubeVirtDeploymentConfig {
return GetTargetConfigFromKVWithEnvVarManager(kv, DefaultEnvVarManager)
}
func GetTargetConfigFromKVWithEnvVarManager(kv *v1.KubeVirt, envVarManager EnvVarManager) *KubeVirtDeploymentConfig {
additionalProperties := getKVMapFromSpec(kv.Spec)
if kv.Spec.Configuration.MigrationConfiguration != nil &&
kv.Spec.Configuration.MigrationConfiguration.Network != nil {
additionalProperties[AdditionalPropertiesMigrationNetwork] = *kv.Spec.Configuration.MigrationConfiguration.Network
}
if kv.Spec.Configuration.DeveloperConfiguration != nil && len(kv.Spec.Configuration.DeveloperConfiguration.FeatureGates) > 0 {
for _, v := range kv.Spec.Configuration.DeveloperConfiguration.FeatureGates {
if v == featuregate.PersistentReservation {
additionalProperties[AdditionalPropertiesPersistentReservationEnabled] = ""
}
}
}
// don't use status.target* here, as that is always set, but we need to know if it was set by the spec and with that
// overriding shasums from env vars
return getConfig(kv.Spec.ImageRegistry,
kv.Spec.ImageTag,
kv.Namespace,
additionalProperties,
envVarManager)
}
// retrieve imagePrefix from an existing deployment config (which is stored as JSON)
func getImagePrefixFromDeploymentConfig(deploymentConfig string) (string, bool, error) {
var obj interface{}
err := json.Unmarshal([]byte(deploymentConfig), &obj)
if err != nil {
return "", false, fmt.Errorf("unable to parse deployment config: %v", err)
}
for k, v := range obj.(map[string]interface{}) {
if k == ImagePrefixKey {
return v.(string), true, nil
}
}
return "", false, nil
}
func getKVMapFromSpec(spec v1.KubeVirtSpec) map[string]string {
kvMap := make(map[string]string)
v := reflect.ValueOf(spec)
for i := 0; i < v.NumField(); i++ {
name := v.Type().Field(i).Name
if name == "ImageTag" || name == "ImageRegistry" {
// these are handled in the root deployment config already
continue
}
if name == "ImagePullSecrets" {
value, err := json.Marshal(v.Field(i).Interface())
if err != nil {
fmt.Printf("Cannot encode ImagePullsecrets to JSON %v", err)
} else {
kvMap[name] = string(value)
}
continue
}
value := v.Field(i).String()
kvMap[name] = value
}
return kvMap
}
func GetOperatorImageWithEnvVarManager(envVarManager EnvVarManager) string {
image := envVarManager.Getenv(VirtOperatorImageEnvName)
if image != "" {
return image
}
return envVarManager.Getenv(OldOperatorImageEnvName)
}
func getImagePrefix(parsedImage [][]string) string {
if len(parsedImage) == 1 {
return parsedImage[0][2]
}
return ""
}
func getImageRegistry(parsedImage [][]string) string {
if len(parsedImage) == 1 {
return parsedImage[0][1]
}
return ""
}
func getTag(parsedImage [][]string, kubeVirtVersion string) string {
if len(parsedImage) != 1 {
return kubeVirtVersion
}
version := parsedImage[0][3]
if version == "" {
return "latest"
} else if strings.HasPrefix(version, ":") {
return strings.TrimPrefix(version, ":")
} else {
// we have a shasum... chances are high that we get the shasums for the other images as well from env vars,
// but as a fallback use latest tag
return kubeVirtVersion
}
}
func getConfig(providedRegistry, providedTag, namespace string, additionalProperties map[string]string, envVarManager EnvVarManager) *KubeVirtDeploymentConfig {
// get registry and tag/shasum from operator image
imageString := GetOperatorImageWithEnvVarManager(envVarManager)
imageRegEx := regexp.MustCompile(operatorImageRegex)
parsedImage := imageRegEx.FindAllStringSubmatch(imageString, 1)
kubeVirtVersion := envVarManager.Getenv(KubeVirtVersionEnvName)
if kubeVirtVersion == "" {
kubeVirtVersion = "latest"
}
imagePrefix, useStoredImagePrefix := additionalProperties[ImagePrefixKey]
if !useStoredImagePrefix {
imagePrefix = getImagePrefix(parsedImage)
}
operatorImage := ""
registry, tag := getImageRegistry(parsedImage), getTag(parsedImage, kubeVirtVersion)
if providedRegistry == "" && providedTag == "" {
operatorImage = imageString
} else {
if providedRegistry != "" {
registry = providedRegistry
}
if providedTag != "" {
tag = providedTag
}
operatorImagePrefix := imagePrefix
if !useStoredImagePrefix {
operatorImagePrefix = ""
}
version := fmt.Sprintf(":%s", tag)
operatorImage = fmt.Sprintf("%s/%s%s", registry, fmt.Sprintf("%s%s", operatorImagePrefix, "virt-operator"), version)
}
passthroughEnv := GetPassthroughEnv()
apiImage := envVarManager.Getenv(VirtApiImageEnvName)
controllerImage := envVarManager.Getenv(VirtControllerImageEnvName)
handlerImage := envVarManager.Getenv(VirtHandlerImageEnvName)
launcherImage := envVarManager.Getenv(VirtLauncherImageEnvName)
exportProxyImage := envVarManager.Getenv(VirtExportProxyImageEnvName)
exportServerImage := envVarManager.Getenv(VirtExportServerImageEnvName)
synchronizationControllerImage := envVarManager.Getenv(VirtSynchronizationControllerImageEnvName)
GsImage := envVarManager.Getenv(GsImageEnvName)
PrHelperImage := envVarManager.Getenv(PrHelperImageEnvName)
SidecarShimImage := envVarManager.Getenv(SidecarShimImageEnvName)
return newDeploymentConfigWithTag(registry, imagePrefix, tag, namespace, operatorImage, apiImage, controllerImage, handlerImage, launcherImage, exportProxyImage, exportServerImage, synchronizationControllerImage, GsImage, PrHelperImage, SidecarShimImage, additionalProperties, passthroughEnv)
}
func VerifyEnv() error {
return VerifyEnvWithEnvVarManager(DefaultEnvVarManager)
}
func VerifyEnvWithEnvVarManager(envVarManager EnvVarManager) error {
// ensure the operator image is valid
imageString := GetOperatorImageWithEnvVarManager(envVarManager)
if imageString == "" {
return fmt.Errorf("cannot find virt-operator's image")
}
return nil
}
func GetPassthroughEnv() map[string]string {
return GetPassthroughEnvWithEnvVarManager(DefaultEnvVarManager)
}
func GetPassthroughEnvWithEnvVarManager(envVarManager EnvVarManager) map[string]string {
passthroughEnv := map[string]string{}
for _, env := range envVarManager.Environ() {
if strings.HasPrefix(env, PassthroughEnvPrefix) {
split := strings.Split(env, "=")
passthroughEnv[strings.TrimPrefix(split[0], PassthroughEnvPrefix)] = split[1]
}
}
return passthroughEnv
}
func newDeploymentConfigWithTag(registry, imagePrefix, tag, namespace, operatorImage, apiImage, controllerImage, handlerImage, launcherImage, exportProxyImage, exportServerImage, synchronizationControllerImage, gsImage, prHelperImage, sidecarShimImage string, kvSpec, passthroughEnv map[string]string) *KubeVirtDeploymentConfig {
c := &KubeVirtDeploymentConfig{
Registry: registry,
ImagePrefix: imagePrefix,
KubeVirtVersion: tag,
VirtOperatorImage: operatorImage,
VirtApiImage: apiImage,
VirtControllerImage: controllerImage,
VirtHandlerImage: handlerImage,
VirtLauncherImage: launcherImage,
VirtExportProxyImage: exportProxyImage,
VirtExportServerImage: exportServerImage,
VirtSynchronizationControllerImage: synchronizationControllerImage,
GsImage: gsImage,
PrHelperImage: prHelperImage,
SidecarShimImage: sidecarShimImage,
Namespace: namespace,
AdditionalProperties: kvSpec,
PassthroughEnvVars: passthroughEnv,
}
c.generateInstallStrategyID()
return c
}
func (c *KubeVirtDeploymentConfig) GetOperatorVersion() string {
if digest := DigestFromImageName(c.VirtOperatorImage); digest != "" {
return digest
}
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetApiVersion() string {
if digest := DigestFromImageName(c.VirtApiImage); digest != "" {
return digest
}
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetControllerVersion() string {
if digest := DigestFromImageName(c.VirtControllerImage); digest != "" {
return digest
}
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetHandlerVersion() string {
if digest := DigestFromImageName(c.VirtHandlerImage); digest != "" {
return digest
}
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetLauncherVersion() string {
if digest := DigestFromImageName(c.VirtLauncherImage); digest != "" {
return digest
}
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetExportProxyVersion() string {
if digest := DigestFromImageName(c.VirtExportProxyImage); digest != "" {
return digest
}
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetSynchronizationControllerVersion() string {
if digest := DigestFromImageName(c.VirtSynchronizationControllerImage); digest != "" {
return digest
}
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetExportServerVersion() string {
if digest := DigestFromImageName(c.VirtExportServerImage); digest != "" {
return digest
}
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetPrHelperVersion() string {
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetSidecarShimVersion() string {
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetKubeVirtVersion() string {
return c.KubeVirtVersion
}
func (c *KubeVirtDeploymentConfig) GetImageRegistry() string {
return c.Registry
}
func (c *KubeVirtDeploymentConfig) GetImagePrefix() string {
return c.ImagePrefix
}
func (c *KubeVirtDeploymentConfig) GetExtraEnv() map[string]string {
return c.PassthroughEnvVars
}
func (c *KubeVirtDeploymentConfig) SetTargetDeploymentConfig(kv *v1.KubeVirt) error {
kv.Status.TargetKubeVirtVersion = c.GetKubeVirtVersion()
kv.Status.TargetKubeVirtRegistry = c.GetImageRegistry()
kv.Status.TargetDeploymentID = c.GetDeploymentID()
json, err := c.GetJson()
kv.Status.TargetDeploymentConfig = json
return err
}
func (c *KubeVirtDeploymentConfig) SetDefaultArchitecture(kv *v1.KubeVirt) error {
if kv.Spec.Configuration.ArchitectureConfiguration != nil && kv.Spec.Configuration.ArchitectureConfiguration.DefaultArchitecture != "" {
kv.Status.DefaultArchitecture = kv.Spec.Configuration.ArchitectureConfiguration.DefaultArchitecture
} else {
// only set default architecture in status in the event that it has not been already set previously
if kv.Status.DefaultArchitecture == "" {
kv.Status.DefaultArchitecture = runtime.GOARCH
}
}
return nil
}
func (c *KubeVirtDeploymentConfig) SetObservedDeploymentConfig(kv *v1.KubeVirt) error {
kv.Status.ObservedKubeVirtVersion = c.GetKubeVirtVersion()
kv.Status.ObservedKubeVirtRegistry = c.GetImageRegistry()
kv.Status.ObservedDeploymentID = c.GetDeploymentID()
json, err := c.GetJson()
kv.Status.ObservedDeploymentConfig = json
return err
}
func (c *KubeVirtDeploymentConfig) GetImagePullPolicy() k8sv1.PullPolicy {
p := c.AdditionalProperties[AdditionalPropertiesNamePullPolicy]
if p != "" {
return k8sv1.PullPolicy(p)
}
return k8sv1.PullIfNotPresent
}
func (c *KubeVirtDeploymentConfig) GetImagePullSecrets() []k8sv1.LocalObjectReference {
var data []k8sv1.LocalObjectReference
s, ok := c.AdditionalProperties[AdditionalPropertiesPullSecrets]
if !ok {
return data
}
if err := json.Unmarshal([]byte(s), &data); err != nil {
fmt.Printf("Unable to parse imagePullSecrets: %v\n", err)
if e, ok := err.(*json.SyntaxError); ok {
fmt.Printf("syntax error at byte offset %d\n", e.Offset)
}
return data
}
return data
}
func (c *KubeVirtDeploymentConfig) PersistentReservationEnabled() bool {
_, enabled := c.AdditionalProperties[AdditionalPropertiesPersistentReservationEnabled]
return enabled
}
func (c *KubeVirtDeploymentConfig) GetMigrationNetwork() *string {
value, enabled := c.AdditionalProperties[AdditionalPropertiesMigrationNetwork]
if enabled {
return &value
} else {
return nil
}
}
func (c *KubeVirtDeploymentConfig) GetSynchronizationPort() int32 {
value, enabled := c.AdditionalProperties[AdditionalPropertiesSynchronizationPort]
if enabled {
port, err := strconv.Atoi(value)
if err != nil {
log.Log.Errorf("Unable to convert %s to integer", value)
} else {
return int32(port)
}
}
return DefaultSynchronizationPort
}
/*
if the monitoring namespace field is defiend in kubevirtCR than return it
otherwise we return common monitoring namespaces.
*/
func (c *KubeVirtDeploymentConfig) GetPotentialMonitorNamespaces() []string {
p := c.AdditionalProperties[AdditionalPropertiesMonitorNamespace]
if p == "" {
return DefaultMonitorNamespaces
}
return []string{p}
}
func (c *KubeVirtDeploymentConfig) GetServiceMonitorNamespace() string {
svcMonitorNs := c.AdditionalProperties[AdditionalPropertiesServiceMonitorNamespace]
return svcMonitorNs
}
func (c *KubeVirtDeploymentConfig) GetMonitorServiceAccountName() string {
p := c.AdditionalProperties[AdditionalPropertiesMonitorServiceAccount]
if p == "" {
return DefaultMonitorAccount
}
return p
}
func (c *KubeVirtDeploymentConfig) GetNamespace() string {
return c.Namespace
}
func (c *KubeVirtDeploymentConfig) GetVerbosity() string {
// not configurable yet
return "2"
}
func (c *KubeVirtDeploymentConfig) GetProductComponent() string {
return c.AdditionalProperties[ProductComponentKey]
}
func (c *KubeVirtDeploymentConfig) GetProductName() string {
return c.AdditionalProperties[ProductNameKey]
}
func (c *KubeVirtDeploymentConfig) GetProductVersion() string {
productVersion, ok := c.AdditionalProperties[ProductVersionKey]
if !ok {
return c.GetKubeVirtVersion()
}
return productVersion
}
func (c *KubeVirtDeploymentConfig) generateInstallStrategyID() {
// We need an id, which identifies a KubeVirt deployment based on version, registry, namespace, and other
// changeable properties from the KubeVirt CR. This will be used for identifying the correct install strategy job
// and configmap
// Calculate a sha over all those properties
// #nosec CWE: 326 - Use of weak cryptographic primitive (http://cwe.mitre.org/data/definitions/326.html)
// reason: sha1 is not used for encryption but for creating a hash value
hasher := sha1.New()
values := getStringFromFields(*c)
hasher.Write([]byte(values))
c.ID = hex.EncodeToString(hasher.Sum(nil))
}
// use KubeVirtDeploymentConfig by value because we modify sth just for the ID
func getStringFromFields(c KubeVirtDeploymentConfig) string {
result := ""
// image prefix might be empty. In order to get the same ID for missing and empty, remove an empty one
if prefix, ok := c.AdditionalProperties[ImagePrefixKey]; ok && prefix == "" {
delete(c.AdditionalProperties, ImagePrefixKey)
}
v := reflect.ValueOf(c)
for i := 0; i < v.NumField(); i++ {
fieldName := v.Type().Field(i).Name
result += fieldName
field := v.Field(i)
if field.Type().Kind() == reflect.Map {
keys := field.MapKeys()
nameKeys := make(map[string]reflect.Value, len(keys))
names := make([]string, 0, len(keys))
for _, key := range keys {
name := key.String()
if name == "" {
continue
}
nameKeys[name] = key
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
key := nameKeys[name]
val := field.MapIndex(key).String()
result += name
result += val
}
} else {
value := v.Field(i).String()
result += value
}
}
return result
}
func (c *KubeVirtDeploymentConfig) GetDeploymentID() string {
return c.ID
}
func (c *KubeVirtDeploymentConfig) GetJson() (string, error) {
json, err := json.Marshal(c)
if err != nil {
return "", err
}
return string(json), nil
}
func NewEnvVarMap(envMap map[string]string) *[]k8sv1.EnvVar {
env := []k8sv1.EnvVar{}
for k, v := range envMap {
env = append(env, k8sv1.EnvVar{Name: k, Value: v})
}
return &env
}
func IsValidLabel(label string) bool {
// First and last character must be alphanumeric
// middle chars can be alphanumeric, or dot hyphen or dash
// entire string must not exceed 63 chars
r := regexp.MustCompile(`^([a-z0-9A-Z]([a-z0-9A-Z\-\_\.]{0,61}[a-z0-9A-Z])?)?$`)
return r.Match([]byte(label))
}
func DigestFromImageName(name string) (digest string) {
if name != "" && strings.LastIndex(name, "@sha256:") != -1 {
digest = strings.Split(name, "@sha256:")[1]
}
return
}
package util
import (
"fmt"
"os"
)
type EnvVarManager interface {
Getenv(key string) string
Setenv(key, value string) error
Unsetenv(key string) error
Environ() []string
}
type EnvVarManagerImpl struct{}
func (e EnvVarManagerImpl) Getenv(key string) string {
return os.Getenv(key)
}
func (e EnvVarManagerImpl) Setenv(key, value string) error {
return os.Setenv(key, value)
}
func (e EnvVarManagerImpl) Unsetenv(key string) error {
return os.Unsetenv(key)
}
func (e EnvVarManagerImpl) Environ() []string {
return os.Environ()
}
type EnvVarManagerMock struct {
envVars map[string]string
}
func (e *EnvVarManagerMock) Getenv(key string) string {
return e.envVars[key]
}
func (e *EnvVarManagerMock) Setenv(key, value string) error {
if e.envVars == nil {
e.envVars = make(map[string]string)
}
e.envVars[key] = value
return nil
}
func (e *EnvVarManagerMock) Unsetenv(key string) error {
delete(e.envVars, key)
return nil
}
func (e *EnvVarManagerMock) Environ() (ret []string) {
for key, value := range e.envVars {
ret = append(ret, fmt.Sprintf("%s=%s", key, value))
}
return ret
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package util
import (
"strings"
appsv1 "k8s.io/api/apps/v1"
k8sv1 "k8s.io/api/core/v1"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/client-go/log"
)
func DaemonsetIsReady(kv *v1.KubeVirt, daemonset *appsv1.DaemonSet, stores Stores) bool {
// ensure we're looking at the latest daemonset from cache
obj, exists, _ := stores.DaemonSetCache.Get(daemonset)
if exists {
daemonset = obj.(*appsv1.DaemonSet)
} else {
// not in cache yet
return false
}
if daemonset.Status.DesiredNumberScheduled == 0 ||
daemonset.Status.DesiredNumberScheduled != daemonset.Status.NumberReady {
log.Log.V(4).Infof("DaemonSet %v not ready yet", daemonset.Name)
return false
}
// cross check that we have 'daemonset.Status.NumberReady' pods with
// the desired version tag. This ensures we wait for rolling update to complete
// before marking the infrastructure as 100% ready.
var podsReady int32
for _, obj := range stores.InfrastructurePodCache.List() {
if pod, ok := obj.(*k8sv1.Pod); ok {
if !podIsRunning(pod) {
continue
} else if !podHasNamePrefix(pod, daemonset.Name) {
continue
}
if !PodIsUpToDate(pod, kv) {
log.Log.Infof("DaemonSet %v waiting for out of date pods to terminate.", daemonset.Name)
return false
}
if PodIsReady(pod) {
podsReady++
}
}
}
if podsReady == 0 {
log.Log.Infof("DaemonSet %v not ready yet. Waiting for all pods to be ready", daemonset.Name)
return false
}
// Misscheduled but up to date daemonset pods will not be evicted unless manually deleted or the daemonset gets updated.
// Don't force the Available condition to false or block the upgrade on up-to-date misscheduled pods.
return podsReady >= daemonset.Status.DesiredNumberScheduled
}
func DeploymentIsReady(kv *v1.KubeVirt, deployment *appsv1.Deployment, stores Stores) bool {
// ensure we're looking at the latest deployment from cache
obj, exists, _ := stores.DeploymentCache.Get(deployment)
if exists {
deployment = obj.(*appsv1.Deployment)
} else {
// not in cache yet
return false
}
if deployment.Status.Replicas == 0 || deployment.Status.ReadyReplicas == 0 {
log.Log.V(4).Infof("Deployment %v not ready yet", deployment.Name)
return false
}
// cross check that we have 'deployment.Status.ReadyReplicas' pods with
// the desired version tag. This ensures we wait for rolling update to complete
// before marking the infrastructure as 100% ready.
var podsReady int32
for _, obj := range stores.InfrastructurePodCache.List() {
if pod, ok := obj.(*k8sv1.Pod); ok {
if !podIsRunning(pod) {
continue
} else if !podHasNamePrefix(pod, deployment.Name) {
continue
}
if !PodIsUpToDate(pod, kv) {
log.Log.Infof("Deployment %v waiting for out of date pods to terminate.", deployment.Name)
return false
}
if PodIsReady(pod) {
podsReady++
}
}
}
if podsReady == 0 {
log.Log.Infof("Deployment %v not ready yet. Waiting for at least one pod to become ready", deployment.Name)
return false
}
return true
}
func DaemonSetIsUpToDate(kv *v1.KubeVirt, daemonSet *appsv1.DaemonSet) bool {
version := kv.Status.TargetKubeVirtVersion
registry := kv.Status.TargetKubeVirtRegistry
id := kv.Status.TargetDeploymentID
return daemonSet.Annotations[v1.InstallStrategyVersionAnnotation] == version &&
daemonSet.Annotations[v1.InstallStrategyRegistryAnnotation] == registry &&
daemonSet.Annotations[v1.InstallStrategyIdentifierAnnotation] == id
}
func podIsRunning(pod *k8sv1.Pod) bool {
return pod.Status.Phase == k8sv1.PodRunning
}
func podHasNamePrefix(pod *k8sv1.Pod, namePrefix string) bool {
return strings.Contains(pod.Name, namePrefix)
}
func PodIsUpToDate(pod *k8sv1.Pod, kv *v1.KubeVirt) bool {
if pod.Annotations == nil {
return false
}
version, ok := pod.Annotations[v1.InstallStrategyVersionAnnotation]
if !ok || version != kv.Status.TargetKubeVirtVersion {
return false
}
imageRegistry, ok := pod.Annotations[v1.InstallStrategyRegistryAnnotation]
if !ok || imageRegistry != kv.Status.TargetKubeVirtRegistry {
return false
}
id, ok := pod.Annotations[v1.InstallStrategyIdentifierAnnotation]
if !ok || id != kv.Status.TargetDeploymentID {
return false
}
return true
}
func PodIsReady(pod *k8sv1.Pod) bool {
if pod.Status.Phase != k8sv1.PodRunning {
return false
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
return false
}
}
return true
}
func PodIsCrashLooping(pod *k8sv1.Pod) bool {
haveContainersCrashed := func(cs []k8sv1.ContainerStatus) bool {
for i := range cs {
if cs[i].State.Terminated != nil ||
cs[i].LastTerminationState.Terminated != nil ||
cs[i].RestartCount > 0 {
return true
}
}
return false
}
return haveContainersCrashed(pod.Status.ContainerStatuses)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package util
import (
secv1 "github.com/openshift/api/security/v1"
"k8s.io/client-go/tools/cache"
v1 "kubevirt.io/api/core/v1"
"kubevirt.io/kubevirt/pkg/controller"
)
type OperatorConfig struct {
IsOnOpenshift bool
ServiceMonitorEnabled bool
PrometheusRulesEnabled bool
ValidatingAdmissionPolicyBindingEnabled bool
ValidatingAdmissionPolicyEnabled bool
}
type Stores struct {
KubeVirtCache cache.Store
ServiceAccountCache cache.Store
ClusterRoleCache cache.Store
ClusterRoleBindingCache cache.Store
RoleCache cache.Store
RoleBindingCache cache.Store
OperatorCrdCache cache.Store
ServiceCache cache.Store
DeploymentCache cache.Store
DaemonSetCache cache.Store
ValidationWebhookCache cache.Store
MutatingWebhookCache cache.Store
APIServiceCache cache.Store
SCCCache cache.Store
RouteCache cache.Store
InstallStrategyConfigMapCache cache.Store
InstallStrategyJobCache cache.Store
InfrastructurePodCache cache.Store
PodDisruptionBudgetCache cache.Store
ServiceMonitorCache cache.Store
NamespaceCache cache.Store
PrometheusRuleCache cache.Store
SecretCache cache.Store
ConfigMapCache cache.Store
ValidatingAdmissionPolicyBindingCache cache.Store
ValidatingAdmissionPolicyCache cache.Store
ClusterInstancetype cache.Store
ClusterPreference cache.Store
}
func (s *Stores) AllEmpty() bool {
return IsStoreEmpty(s.ServiceAccountCache) &&
IsStoreEmpty(s.ClusterRoleCache) &&
IsStoreEmpty(s.ClusterRoleBindingCache) &&
IsStoreEmpty(s.RoleCache) &&
IsStoreEmpty(s.RoleBindingCache) &&
IsStoreEmpty(s.OperatorCrdCache) &&
IsStoreEmpty(s.ServiceCache) &&
IsStoreEmpty(s.DeploymentCache) &&
IsStoreEmpty(s.DaemonSetCache) &&
IsStoreEmpty(s.ValidationWebhookCache) &&
IsStoreEmpty(s.MutatingWebhookCache) &&
IsStoreEmpty(s.APIServiceCache) &&
IsStoreEmpty(s.PodDisruptionBudgetCache) &&
IsSCCStoreEmpty(s.SCCCache) &&
IsStoreEmpty(s.RouteCache) &&
IsStoreEmpty(s.ServiceMonitorCache) &&
IsStoreEmpty(s.PrometheusRuleCache) &&
IsStoreEmpty(s.SecretCache) &&
IsStoreEmpty(s.ConfigMapCache) &&
IsStoreEmpty(s.ValidatingAdmissionPolicyBindingCache) &&
IsStoreEmpty(s.ValidatingAdmissionPolicyCache)
// Don't add InstallStrategyConfigMapCache to this list. The install
// strategies persist even after deletion and updates.
}
func IsStoreEmpty(store cache.Store) bool {
return len(store.ListKeys()) == 0
}
func IsManagedByOperator(labels map[string]string) bool {
if v, ok := labels[v1.ManagedByLabel]; ok && (v == v1.ManagedByLabelOperatorValue || v == v1.ManagedByLabelOperatorOldValue) {
return true
}
return false
}
func IsSCCStoreEmpty(store cache.Store) bool {
cnt := 0
for _, obj := range store.List() {
if s, ok := obj.(*secv1.SecurityContextConstraints); ok && IsManagedByOperator(s.GetLabels()) {
cnt++
}
}
return cnt == 0
}
type Expectations struct {
ServiceAccount *controller.UIDTrackingControllerExpectations
ClusterRole *controller.UIDTrackingControllerExpectations
ClusterRoleBinding *controller.UIDTrackingControllerExpectations
Role *controller.UIDTrackingControllerExpectations
RoleBinding *controller.UIDTrackingControllerExpectations
OperatorCrd *controller.UIDTrackingControllerExpectations
Service *controller.UIDTrackingControllerExpectations
Deployment *controller.UIDTrackingControllerExpectations
DaemonSet *controller.UIDTrackingControllerExpectations
ValidationWebhook *controller.UIDTrackingControllerExpectations
MutatingWebhook *controller.UIDTrackingControllerExpectations
APIService *controller.UIDTrackingControllerExpectations
SCC *controller.UIDTrackingControllerExpectations
Route *controller.UIDTrackingControllerExpectations
InstallStrategyConfigMap *controller.UIDTrackingControllerExpectations
InstallStrategyJob *controller.UIDTrackingControllerExpectations
PodDisruptionBudget *controller.UIDTrackingControllerExpectations
ServiceMonitor *controller.UIDTrackingControllerExpectations
PrometheusRule *controller.UIDTrackingControllerExpectations
Secrets *controller.UIDTrackingControllerExpectations
ConfigMap *controller.UIDTrackingControllerExpectations
ValidatingAdmissionPolicyBinding *controller.UIDTrackingControllerExpectations
ValidatingAdmissionPolicy *controller.UIDTrackingControllerExpectations
}
type Informers struct {
KubeVirt cache.SharedIndexInformer
CRD cache.SharedIndexInformer
ServiceAccount cache.SharedIndexInformer
ClusterRole cache.SharedIndexInformer
ClusterRoleBinding cache.SharedIndexInformer
Role cache.SharedIndexInformer
RoleBinding cache.SharedIndexInformer
OperatorCrd cache.SharedIndexInformer
Service cache.SharedIndexInformer
Deployment cache.SharedIndexInformer
DaemonSet cache.SharedIndexInformer
ValidationWebhook cache.SharedIndexInformer
MutatingWebhook cache.SharedIndexInformer
APIService cache.SharedIndexInformer
SCC cache.SharedIndexInformer
Route cache.SharedIndexInformer
InstallStrategyConfigMap cache.SharedIndexInformer
InstallStrategyJob cache.SharedIndexInformer
InfrastructurePod cache.SharedIndexInformer
PodDisruptionBudget cache.SharedIndexInformer
ServiceMonitor cache.SharedIndexInformer
Namespace cache.SharedIndexInformer
PrometheusRule cache.SharedIndexInformer
Secrets cache.SharedIndexInformer
ConfigMap cache.SharedIndexInformer
ValidatingAdmissionPolicyBinding cache.SharedIndexInformer
ValidatingAdmissionPolicy cache.SharedIndexInformer
ClusterInstancetype cache.SharedIndexInformer
ClusterPreference cache.SharedIndexInformer
Leases cache.SharedIndexInformer
}
func (e *Expectations) DeleteExpectations(key string) {
e.ServiceAccount.DeleteExpectations(key)
e.ClusterRole.DeleteExpectations(key)
e.ClusterRoleBinding.DeleteExpectations(key)
e.Role.DeleteExpectations(key)
e.RoleBinding.DeleteExpectations(key)
e.OperatorCrd.DeleteExpectations(key)
e.Service.DeleteExpectations(key)
e.Deployment.DeleteExpectations(key)
e.DaemonSet.DeleteExpectations(key)
e.ValidationWebhook.DeleteExpectations(key)
e.MutatingWebhook.DeleteExpectations(key)
e.APIService.DeleteExpectations(key)
e.SCC.DeleteExpectations(key)
e.Route.DeleteExpectations(key)
e.InstallStrategyConfigMap.DeleteExpectations(key)
e.InstallStrategyJob.DeleteExpectations(key)
e.PodDisruptionBudget.DeleteExpectations(key)
e.ServiceMonitor.DeleteExpectations(key)
e.PrometheusRule.DeleteExpectations(key)
e.Secrets.DeleteExpectations(key)
e.ConfigMap.DeleteExpectations(key)
e.ValidatingAdmissionPolicyBinding.DeleteExpectations(key)
e.ValidatingAdmissionPolicy.DeleteExpectations(key)
}
func (e *Expectations) ResetExpectations(key string) {
e.ServiceAccount.SetExpectations(key, 0, 0)
e.ClusterRole.SetExpectations(key, 0, 0)
e.ClusterRoleBinding.SetExpectations(key, 0, 0)
e.Role.SetExpectations(key, 0, 0)
e.RoleBinding.SetExpectations(key, 0, 0)
e.OperatorCrd.SetExpectations(key, 0, 0)
e.Service.SetExpectations(key, 0, 0)
e.Deployment.SetExpectations(key, 0, 0)
e.DaemonSet.SetExpectations(key, 0, 0)
e.ValidationWebhook.SetExpectations(key, 0, 0)
e.MutatingWebhook.SetExpectations(key, 0, 0)
e.APIService.SetExpectations(key, 0, 0)
e.SCC.SetExpectations(key, 0, 0)
e.Route.SetExpectations(key, 0, 0)
e.InstallStrategyConfigMap.SetExpectations(key, 0, 0)
e.InstallStrategyJob.SetExpectations(key, 0, 0)
e.PodDisruptionBudget.SetExpectations(key, 0, 0)
e.ServiceMonitor.SetExpectations(key, 0, 0)
e.PrometheusRule.SetExpectations(key, 0, 0)
e.Secrets.SetExpectations(key, 0, 0)
e.ConfigMap.SetExpectations(key, 0, 0)
e.ValidatingAdmissionPolicyBinding.SetExpectations(key, 0, 0)
e.ValidatingAdmissionPolicy.SetExpectations(key, 0, 0)
}
func (e *Expectations) SatisfiedExpectations(key string) bool {
return e.ServiceAccount.SatisfiedExpectations(key) &&
e.ClusterRole.SatisfiedExpectations(key) &&
e.ClusterRoleBinding.SatisfiedExpectations(key) &&
e.Role.SatisfiedExpectations(key) &&
e.RoleBinding.SatisfiedExpectations(key) &&
e.OperatorCrd.SatisfiedExpectations(key) &&
e.Service.SatisfiedExpectations(key) &&
e.Deployment.SatisfiedExpectations(key) &&
e.DaemonSet.SatisfiedExpectations(key) &&
e.ValidationWebhook.SatisfiedExpectations(key) &&
e.MutatingWebhook.SatisfiedExpectations(key) &&
e.APIService.SatisfiedExpectations(key) &&
e.SCC.SatisfiedExpectations(key) &&
e.Route.SatisfiedExpectations(key) &&
e.InstallStrategyConfigMap.SatisfiedExpectations(key) &&
e.InstallStrategyJob.SatisfiedExpectations(key) &&
e.PodDisruptionBudget.SatisfiedExpectations(key) &&
e.ServiceMonitor.SatisfiedExpectations(key) &&
e.PrometheusRule.SatisfiedExpectations(key) &&
e.Secrets.SatisfiedExpectations(key) &&
e.ConfigMap.SatisfiedExpectations(key) &&
e.ValidatingAdmissionPolicyBinding.SatisfiedExpectations(key) &&
e.ValidatingAdmissionPolicy.SatisfiedExpectations(key)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright The KubeVirt Authors.
*
*/
package virtiofs
import (
"fmt"
"path/filepath"
"kubevirt.io/kubevirt/pkg/util"
)
// This is empty dir
var VirtioFSContainers = "virtiofs-containers"
var VirtioFSContainersMountBaseDir = filepath.Join(util.VirtShareDir, VirtioFSContainers)
func VirtioFSSocketPath(volumeName string) string {
socketName := fmt.Sprintf("%s.sock", volumeName)
return filepath.Join(VirtioFSContainersMountBaseDir, socketName)
}
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package util
import (
"encoding/json"
"io"
"strings"
v1 "kubevirt.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/yaml"
)
func MarshallObject(obj interface{}, writer io.Writer) error {
jsonBytes, err := json.Marshal(obj)
if err != nil {
return err
}
var r unstructured.Unstructured
if err := json.Unmarshal(jsonBytes, &r.Object); err != nil {
return err
}
// remove status and metadata.creationTimestamp
unstructured.RemoveNestedField(r.Object, "metadata", "creationTimestamp")
unstructured.RemoveNestedField(r.Object, "template", "metadata", "creationTimestamp")
unstructured.RemoveNestedField(r.Object, "spec", "template", "metadata", "creationTimestamp")
unstructured.RemoveNestedField(r.Object, "status")
// remove dataSource from PVCs if empty
templates, exists, err := unstructured.NestedSlice(r.Object, "spec", "dataVolumeTemplates")
if err != nil {
return err
}
if exists {
for _, tmpl := range templates {
template := tmpl.(map[string]interface{})
_, exists, err = unstructured.NestedString(template, "spec", "pvc", "dataSource")
if err != nil {
return err
}
if !exists {
unstructured.RemoveNestedField(template, "spec", "pvc", "dataSource")
}
}
unstructured.SetNestedSlice(r.Object, templates, "spec", "dataVolumeTemplates")
}
objects, exists, err := unstructured.NestedSlice(r.Object, "objects")
if err != nil {
return err
}
if exists {
for _, obj := range objects {
object := obj.(map[string]interface{})
kind, exists, _ := unstructured.NestedString(object, "kind")
if exists && kind == "PersistentVolumeClaim" {
_, exists, err = unstructured.NestedString(object, "spec", "dataSource")
if err != nil {
return err
}
if !exists {
unstructured.RemoveNestedField(object, "spec", "dataSource")
}
}
unstructured.RemoveNestedField(object, "status", "startFailure")
}
unstructured.SetNestedSlice(r.Object, objects, "objects")
}
deployments, exists, err := unstructured.NestedSlice(r.Object, "spec", "install", "spec", "deployments")
if err != nil {
return err
}
if exists {
for _, obj := range deployments {
deployment := obj.(map[string]interface{})
unstructured.RemoveNestedField(deployment, "metadata", "creationTimestamp")
unstructured.RemoveNestedField(deployment, "spec", "template", "metadata", "creationTimestamp")
unstructured.RemoveNestedField(deployment, "status")
}
unstructured.SetNestedSlice(r.Object, deployments, "spec", "install", "spec", "deployments")
}
// remove "managed by operator" label...
labels, exists, err := unstructured.NestedMap(r.Object, "metadata", "labels")
if err != nil {
return err
}
if exists {
delete(labels, v1.ManagedByLabel)
unstructured.SetNestedMap(r.Object, labels, "metadata", "labels")
}
jsonBytes, err = json.Marshal(r.Object)
if err != nil {
return err
}
yamlBytes, err := yaml.JSONToYAML(jsonBytes)
if err != nil {
return err
}
// fix templates by removing unneeded single quotes...
s := string(yamlBytes)
s = strings.Replace(s, "'{{", "{{", -1)
s = strings.Replace(s, "}}'", "}}", -1)
// fix double quoted strings by removing unneeded single quotes...
s = strings.Replace(s, " '\"", " \"", -1)
s = strings.Replace(s, "\"'\n", "\"\n", -1)
yamlBytes = []byte(s)
_, err = writer.Write([]byte("---\n"))
if err != nil {
return err
}
_, err = writer.Write(yamlBytes)
if err != nil {
return err
}
return nil
}