package apparmor
import (
"errors"
"fmt"
"strings"
"github.com/containers/common/pkg/apparmor"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
)
// DefaultProfile is the default profile name.
const DefaultProfile = "crio-default"
// Config is the global AppArmor configuration type.
type Config struct {
enabled bool
defaultProfile string
}
// New creates a new default AppArmor configuration instance.
func New() *Config {
return &Config{
enabled: apparmor.IsEnabled(),
defaultProfile: DefaultProfile,
}
}
// LoadProfile can be used to load a AppArmor profile from the provided path.
// This method will not fail if AppArmor is disabled.
func (c *Config) LoadProfile(profile string) error {
if !c.IsEnabled() {
logrus.Info("AppArmor is disabled by the system or at CRI-O build-time")
return nil
}
if profile == v1.DeprecatedAppArmorBetaProfileNameUnconfined {
logrus.Info("AppArmor profile is unconfined which basically disables it")
c.defaultProfile = v1.DeprecatedAppArmorBetaProfileNameUnconfined
return nil
}
// Load the default profile
if profile == "" || profile == DefaultProfile {
logrus.Infof("Installing default AppArmor profile: %v", DefaultProfile)
if err := apparmor.InstallDefault(DefaultProfile); err != nil {
return fmt.Errorf(
"installing default AppArmor profile %q failed: %w",
DefaultProfile, err,
)
}
if logrus.IsLevelEnabled(logrus.TraceLevel) {
c, err := apparmor.DefaultContent(DefaultProfile)
if err != nil {
return fmt.Errorf(
"retrieving default AppArmor profile %q content failed: %w",
DefaultProfile, err,
)
}
logrus.Tracef("Default AppArmor profile contents: %s", c)
}
c.defaultProfile = DefaultProfile
return nil
}
// Load a custom profile
logrus.Infof("Assuming user-provided AppArmor profile: %v", profile)
isLoaded, err := apparmor.IsLoaded(profile)
if err != nil {
return fmt.Errorf(
"checking if AppArmor profile %s is loaded: %w", profile, err,
)
}
if !isLoaded {
return fmt.Errorf(
"config provided AppArmor profile %q not loaded", profile,
)
}
c.defaultProfile = profile
return nil
}
// IsEnabled returns true if AppArmor is enabled via the `apparmor` buildtag
// and globally by the system.
func (c *Config) IsEnabled() bool {
return c.enabled
}
// Apply returns the trimmed AppArmor profile to be used and reloads if the
// default profile is specified.
// The AppArmor profile to the CRI via the deprecated apparmor_profile field
// in favor of the newer structured apparmor field.
// CRI provides the AppArmor profile via both fields to maintain backwards compatibility.
// ref https://github.com/kubernetes/kubernetes/pull/123811
// Process new field and fallback to deprecated. From the kubernetes side both fields are populated.
// TODO: Clean off deprecated AppArmorProfile usage.
func (c *Config) Apply(p *runtimeapi.LinuxContainerSecurityContext) (string, error) {
// Runtime default profile
if p.GetApparmor() != nil && p.GetApparmor().GetProfileType() == runtimeapi.SecurityProfile_RuntimeDefault {
return c.defaultProfile, nil
}
//nolint:staticcheck // see deprecation TODO above
if p.GetApparmor() == nil && p.GetApparmorProfile() == "" || p.GetApparmorProfile() == v1.DeprecatedAppArmorBetaProfileRuntimeDefault {
return c.defaultProfile, nil
}
securityProfile := ""
//nolint:staticcheck // see deprecation TODO above
if p.GetApparmor() == nil && p.GetApparmorProfile() != "" {
securityProfile = p.GetApparmorProfile()
}
if p.GetApparmor() != nil && p.GetApparmor().GetLocalhostRef() != "" {
securityProfile = p.GetApparmor().GetLocalhostRef()
}
//nolint:staticcheck // see deprecation TODO above
if p.GetApparmor() == nil && strings.EqualFold(p.GetApparmorProfile(), v1.DeprecatedAppArmorBetaProfileNameUnconfined) {
securityProfile = v1.DeprecatedAppArmorBetaProfileNameUnconfined
}
if p.GetApparmor() != nil && strings.EqualFold(p.GetApparmor().GetProfileType().String(), v1.DeprecatedAppArmorBetaProfileNameUnconfined) {
securityProfile = v1.DeprecatedAppArmorBetaProfileNameUnconfined
}
securityProfile = strings.TrimPrefix(securityProfile, v1.DeprecatedAppArmorBetaProfileNamePrefix)
if securityProfile == "" {
return "", errors.New("empty localhost AppArmor profile is forbidden")
}
if securityProfile == DefaultProfile {
if err := reloadDefaultProfile(); err != nil {
return "", fmt.Errorf("reloading default profile: %w", err)
}
}
return securityProfile, nil
}
// reloadDefaultProfile reloads the default AppArmor profile and returns an
// error on any failure.
func reloadDefaultProfile() error {
isLoaded, err := apparmor.IsLoaded(DefaultProfile)
if err != nil {
return fmt.Errorf(
"checking if default AppArmor profile %s is loaded: %w", DefaultProfile, err,
)
}
if !isLoaded {
if err := apparmor.InstallDefault(DefaultProfile); err != nil {
return fmt.Errorf(
"installing default AppArmor profile %q failed: %w",
DefaultProfile, err,
)
}
}
return nil
}
package apparmor
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"os"
)
func FuzzLoadConfig(data []byte) int {
c := Config{}
c.enabled = true
f := fuzz.NewConsumer(data)
confBytes, err := f.GetBytes()
if err != nil {
return 0
}
randomFile, err := os.Create("apparmor_fuzz.config")
if err != nil {
return 0
}
defer os.Remove("apparmor_fuzz.config")
_, err = randomFile.Write(confBytes)
if err != nil {
randomFile.Close()
return 0
}
c.LoadProfile("apparmor_fuzz.config")
randomFile.Close()
return 1
}
package blockio
import (
"fmt"
"os"
"path/filepath"
"github.com/intel/goresctrl/pkg/blockio"
"github.com/sirupsen/logrus"
"sigs.k8s.io/yaml"
)
type Config struct {
enabled bool
reload bool
path string
config *blockio.Config
}
// New creates a new blockio config instance.
func New() *Config {
c := &Config{
config: &blockio.Config{},
}
return c
}
// Enabled returns true if blockio is enabled in the system.
func (c *Config) Enabled() bool {
return c.enabled
}
// SetReload sets the blockio reload option.
func (c *Config) SetReload(reload bool) {
c.reload = reload
}
// ReloadRequired returns true if reloading configuration and
// rescanning devices is required.
func (c *Config) ReloadRequired() bool {
return c.reload
}
// Reload (re-)reads the configuration file and rescans block devices in the system.
func (c *Config) Reload() error {
if c.path == "" {
return nil
}
data, err := os.ReadFile(c.path)
if err != nil {
return fmt.Errorf("reading blockio config file failed: %w", err)
}
tmpCfg := &blockio.Config{}
if err = yaml.Unmarshal(data, &tmpCfg); err != nil {
return fmt.Errorf("parsing blockio config failed: %w", err)
}
if err := blockio.SetConfig(tmpCfg, true); err != nil {
return fmt.Errorf("configuring blockio failed: %w", err)
}
c.config = tmpCfg
return nil
}
// Load loads and validates blockio config.
func (c *Config) Load(path string) error {
c.enabled = false
c.path = ""
if path == "" {
logrus.Info("No blockio config file specified, blockio not configured")
return nil
}
c.path = filepath.Clean(path)
if err := c.Reload(); err != nil {
return err
}
logrus.Infof("Blockio config successfully loaded from %q", path)
c.enabled = true
return nil
}
package blockio
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"os"
)
func FuzzLoadConfig(data []byte) int {
c := Config{}
c.enabled = true
f := fuzz.NewConsumer(data)
confBytes, err := f.GetBytes()
if err != nil {
return 0
}
randomFile, err := os.Create("blockio_fuzz.config")
if err != nil {
return 0
}
defer os.Remove("blockio_fuzz.config")
_, err = randomFile.Write(confBytes)
if err != nil {
randomFile.Close()
return 0
}
c.Load("blockio_fuzz.config")
randomFile.Close()
return 1
}
package capabilities
import (
"fmt"
"strings"
common "github.com/containers/common/pkg/capabilities"
"github.com/sirupsen/logrus"
)
// Capabilities is the default representation for capabilities.
type Capabilities []string
// Default returns the default capabilities as string slice.
func Default() Capabilities {
return []string{
"CHOWN",
"DAC_OVERRIDE",
"FSETID",
"FOWNER",
"SETGID",
"SETUID",
"SETPCAP",
"NET_BIND_SERVICE",
"KILL",
}
}
// Validate checks if the provided capabilities are available on the system.
func (c Capabilities) Validate() error {
caps := Capabilities{}
for _, cap := range c {
caps = append(caps, "CAP_"+strings.ToUpper(cap))
}
if err := common.ValidateCapabilities(caps); err != nil {
return fmt.Errorf("validating capabilities: %w", err)
}
logrus.Infof("Using default capabilities: %s", strings.Join(caps, ", "))
return nil
}
//go:build linux
package cgmgr
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/manager"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/cri-o/cri-o/internal/config/node"
)
const (
CrioPrefix = "crio"
// CgroupfsCgroupManager represents cgroupfs native cgroup manager.
cgroupfsCgroupManager = "cgroupfs"
// SystemdCgroupManager represents systemd native cgroup manager.
systemdCgroupManager = "systemd"
DefaultCgroupManager = systemdCgroupManager
// these constants define the path and name of the memory max file
// for v1 and v2 respectively.
CgroupMemoryPathV1 = "/sys/fs/cgroup/memory"
cgroupMemoryMaxFileV1 = "memory.limit_in_bytes"
CgroupMemoryPathV2 = "/sys/fs/cgroup"
cgroupMemoryMaxFileV2 = "memory.max"
)
// CgroupManager is an interface to interact with cgroups on a node. CRI-O is configured at startup to either use
// systemd or cgroupfs, and the node itself is booted with cgroup v1, or cgroup v2. CgroupManager is an interface for
// the CRI-O server to use cgroups, regardless of how it or the node was configured.
type CgroupManager interface {
// String returns the name of the cgroup manager (either cgroupfs or systemd)
Name() string
// IsSystemd returns whether it is a systemd cgroup manager
IsSystemd() bool
// ContainerCgroupPath takes arguments sandbox parent cgroup and container ID and returns
// the cgroup path for that containerID. If parentCgroup is empty, it
// uses the default parent for that particular manager
ContainerCgroupPath(string, string) string
// ContainerCgroupAbsolutePath takes arguments sandbox parent cgroup and container ID and
// returns the cgroup path on disk for that containerID. If parentCgroup is empty, it
// uses the default parent for that particular manager
ContainerCgroupAbsolutePath(string, string) (string, error)
// ContainerCgroupManager takes the cgroup parent, and container ID.
// It returns the raw libcontainer cgroup manager for that container.
ContainerCgroupManager(sbParent, containerID string) (cgroups.Manager, error)
// RemoveContainerCgManager removes the cgroup manager for the container
RemoveContainerCgManager(containerID string)
// ContainerCgroupStats takes the sandbox parent, and container ID.
// It creates a new cgroup if one does not already exist.
// It returns the cgroup stats for that container.
ContainerCgroupStats(sbParent, containerID string) (*CgroupStats, error)
// SandboxCgroupPath takes the sandbox parent, and sandbox ID, and container minimum memory. It
// returns the cgroup parent, cgroup path, and error. For systemd cgroups,
// it also checks there is enough memory in the given cgroup
SandboxCgroupPath(string, string, int64) (string, string, error)
// SandboxCgroupManager takes the cgroup parent, and sandbox ID.
// It returns the raw libcontainer cgroup manager for that sandbox.
SandboxCgroupManager(sbParent, sbID string) (cgroups.Manager, error)
// RemoveSandboxCgroupManager removes the cgroup manager for the sandbox
RemoveSandboxCgManager(sbID string)
// MoveConmonToCgroup takes the container ID, cgroup parent, conmon's cgroup (from the config), conmon's PID, and some customized resources
// It attempts to move conmon to the correct cgroup, and set the resources for that cgroup.
// It returns the cgroupfs parent that conmon was put into
// so that CRI-O can clean the parent cgroup of the newly added conmon once the process terminates (systemd handles this for us)
MoveConmonToCgroup(cid, cgroupParent, conmonCgroup string, pid int, resources *rspec.LinuxResources) (string, error)
// CreateSandboxCgroup takes the sandbox parent, and sandbox ID.
// It creates a new cgroup for that sandbox, which is useful when spoofing an infra container.
CreateSandboxCgroup(sbParent, containerID string) error
// RemoveSandboxCgroup takes the sandbox parent, and sandbox ID.
// It removes the cgroup for that sandbox, which is useful when spoofing an infra container.
RemoveSandboxCgroup(sbParent, containerID string) error
// SandboxCgroupStats takes the sandbox parent, and sandbox ID.
// It creates a new cgroup for that sandbox if it does not already exist.
// It returns the cgroup stats for that sandbox.
SandboxCgroupStats(sbParent, sbID string) (*CgroupStats, error)
}
// New creates a new CgroupManager with defaults.
func New() CgroupManager {
cm, err := SetCgroupManager(DefaultCgroupManager)
if err != nil {
panic(err)
}
return cm
}
// SetCgroupManager takes a string and branches on it to return
// the type of cgroup manager configured.
func SetCgroupManager(cgroupManager string) (CgroupManager, error) {
switch cgroupManager {
case systemdCgroupManager:
return NewSystemdManager(), nil
case cgroupfsCgroupManager:
if node.CgroupIsV2() {
return &CgroupfsManager{
memoryPath: CgroupMemoryPathV2,
memoryMaxFile: cgroupMemoryMaxFileV2,
}, nil
}
return &CgroupfsManager{
memoryPath: CgroupMemoryPathV1,
memoryMaxFile: cgroupMemoryMaxFileV1,
v1CtrCgMgr: make(map[string]cgroups.Manager),
v1SbCgMgr: make(map[string]cgroups.Manager),
}, nil
default:
return nil, fmt.Errorf("invalid cgroup manager: %s", cgroupManager)
}
}
func verifyCgroupHasEnoughMemory(slicePath, memorySubsystemPath, memoryMaxFilename string, containerMinMemory int64) error {
// read in the memory limit from memory max file
fileData, err := os.ReadFile(filepath.Join(memorySubsystemPath, slicePath, memoryMaxFilename))
if err != nil {
if os.IsNotExist(err) {
logrus.Warnf("Failed to find %s at path: %q", memoryMaxFilename, slicePath)
return nil
}
return fmt.Errorf("unable to read memory file for cgroups at %s: %w", slicePath, err)
}
// strip off the newline character and convert it to an int
strMemory := strings.TrimRight(string(fileData), "\n")
if strMemory != "" && strMemory != "max" {
memoryLimit, err := strconv.ParseInt(strMemory, 10, 64)
if err != nil {
return fmt.Errorf("error converting cgroup memory value from string to int %q: %w", strMemory, err)
}
// Compare with the minimum allowed memory limit
if err := VerifyMemoryIsEnough(memoryLimit, containerMinMemory); err != nil {
return fmt.Errorf("pod %w", err)
}
}
return nil
}
// VerifyMemoryIsEnough verifies that the cgroup memory limit is above a specified minimum memory limit.
func VerifyMemoryIsEnough(memoryLimit, minMemory int64) error {
if memoryLimit != 0 && memoryLimit < minMemory {
return fmt.Errorf("set memory limit %d too low; should be at least %d bytes", memoryLimit, minMemory)
}
return nil
}
// MoveProcessToContainerCgroup moves process to the container cgroup.
func MoveProcessToContainerCgroup(containerPid, commandPid int) error {
parentCgroupFile := fmt.Sprintf("/proc/%d/cgroup", containerPid)
cgmap, err := cgroups.ParseCgroupFile(parentCgroupFile)
if err != nil {
return err
}
var dir string
for controller, path := range cgmap {
// For cgroups V2, controller will be an empty string
dir = filepath.Join("/sys/fs/cgroup", controller, path)
if cgroups.PathExists(dir) {
if err := cgroups.WriteCgroupProc(dir, commandPid); err != nil {
return err
}
}
}
return nil
}
// createSandboxCgroup takes the path of the sandbox parent and the desired containerCgroup
// It creates a cgroup through cgroupfs (as opposed to systemd) at the location cgroupRoot/sbParent/containerCgroup.
func createSandboxCgroup(sbParent, containerCgroup string) error {
cg := &cgroups.Cgroup{
Name: containerCgroup,
Parent: sbParent,
Resources: &cgroups.Resources{
SkipDevices: true,
},
}
mgr, err := manager.New(cg)
if err != nil {
return err
}
// The reasoning for this code is slightly obscure. In situation where CPU load balancing is desired,
// all cgroups must either have cpuset.sched_load_balance=0 or they should not have an intersecting cpuset
// with the set that load balancing should be disabled on.
// When this cgroup is created, it is easiest to set sched_load_balance to 0, especially because there will
// not be any processes in this cgroup (or else we wouldn't need to call this).
// Note: this should be done before Apply(-1) below, as Apply contains cpusetCopyIfNeeded(), which will
// populate the cpuset with the parent's cpuset. However, it will be initialized to sched_load_balance=1
// which will cause the kernel to move all cpusets out of their isolated sched_domain, causing unnecessary churn.
if !node.CgroupIsV2() {
path := mgr.Path("cpuset")
if path == "" {
return errors.New("failed to find cpuset for newly created cgroup")
}
if err := os.MkdirAll(path, 0o755); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to create cpuset for newly created cgroup: %w", err)
}
if err := cgroups.WriteFile(path, "cpuset.sched_load_balance", "0"); err != nil {
return fmt.Errorf("failed to set sched_load_balance cpuset for newly created cgroup: %w", err)
}
}
return mgr.Apply(-1)
}
func removeSandboxCgroup(sbParent, containerCgroup string) error {
cg := &cgroups.Cgroup{
Name: containerCgroup,
Parent: sbParent,
Resources: &cgroups.Resources{
SkipDevices: true,
},
}
mgr, err := manager.New(cg)
if err != nil {
return err
}
return mgr.Destroy()
}
func containerCgroupPath(id string) string {
return CrioPrefix + "-" + id
}
//go:build linux
package cgmgr
import (
"fmt"
"path"
"path/filepath"
"strings"
"sync"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/manager"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/cri-o/cri-o/internal/config/node"
"github.com/cri-o/cri-o/utils"
)
// CgroupfsManager defines functionality whrn **** TODO: Update this.
type CgroupfsManager struct {
memoryPath, memoryMaxFile string
// a map of container ID to cgroup manager for cgroup v1
// the reason we need this for v1 only is because the cost of creating a cgroup manager for v2 is very low
// and we don't need to cache it
v1CtrCgMgr map[string]cgroups.Manager
// a map of sandbox ID to cgroup manager for cgroup v1
v1SbCgMgr map[string]cgroups.Manager
mutex sync.Mutex
}
const (
defaultCgroupfsParent = "/crio"
)
// Name returns the name of the cgroup manager (cgroupfs).
func (*CgroupfsManager) Name() string {
return cgroupfsCgroupManager
}
// IsSystemd returns that this is not a systemd cgroup manager.
func (*CgroupfsManager) IsSystemd() bool {
return false
}
// ContainerCgroupPath takes arguments sandbox parent cgroup and container ID and returns
// the cgroup path for that containerID. If parentCgroup is empty, it
// uses the default parent /crio.
func (*CgroupfsManager) ContainerCgroupPath(sbParent, containerID string) string {
parent := defaultCgroupfsParent
if sbParent != "" {
parent = sbParent
}
return filepath.Join("/", parent, containerCgroupPath(containerID))
}
// ContainerCgroupAbsolutePath just calls ContainerCgroupPath,
// because they both return the absolute path.
func (m *CgroupfsManager) ContainerCgroupAbsolutePath(sbParent, containerID string) (string, error) {
return m.ContainerCgroupPath(sbParent, containerID), nil
}
// ContainerCgroupManager takes the cgroup parent, and container ID.
// It returns the raw libcontainer cgroup manager for that container.
func (m *CgroupfsManager) ContainerCgroupManager(sbParent, containerID string) (cgroups.Manager, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if !node.CgroupIsV2() {
if cgMgr, ok := m.v1CtrCgMgr[containerID]; ok {
return cgMgr, nil
}
}
cgPath, err := m.ContainerCgroupAbsolutePath(sbParent, containerID)
if err != nil {
return nil, err
}
cgMgr, err := libctrManager(filepath.Base(cgPath), filepath.Dir(cgPath), false)
if err != nil {
return nil, err
}
if !node.CgroupIsV2() {
// cache only cgroup v1 managers
m.v1CtrCgMgr[containerID] = cgMgr
}
return cgMgr, nil
}
// ContainerCgroupStats takes the sandbox parent, and container ID.
// It creates a new cgroup if one does not already exist.
// It returns the cgroup stats for that container.
func (m *CgroupfsManager) ContainerCgroupStats(sbParent, containerID string) (*CgroupStats, error) {
cgMgr, err := m.ContainerCgroupManager(sbParent, containerID)
if err != nil {
return nil, err
}
return statsFromLibctrMgr(cgMgr)
}
// RemoveContainerCgManager removes the cgroup manager for the container.
func (m *CgroupfsManager) RemoveContainerCgManager(containerID string) {
if !node.CgroupIsV2() {
m.mutex.Lock()
defer m.mutex.Unlock()
delete(m.v1CtrCgMgr, containerID)
}
}
// SandboxCgroupPath takes the sandbox parent, sandbox ID, and container minimum memory.
// It returns the cgroup parent, cgroup path, and error.
// It also checks if enough memory is available in the given cgroup.
func (m *CgroupfsManager) SandboxCgroupPath(sbParent, sbID string, containerMinMemory int64) (cgParent, cgPath string, _ error) {
if strings.HasSuffix(path.Base(sbParent), ".slice") {
return "", "", fmt.Errorf("cri-o configured with cgroupfs cgroup manager, but received systemd slice as parent: %s", sbParent)
}
if err := verifyCgroupHasEnoughMemory(sbParent, m.memoryPath, m.memoryMaxFile, containerMinMemory); err != nil {
return "", "", err
}
return sbParent, filepath.Join(sbParent, containerCgroupPath(sbID)), nil
}
// SandboxCgroupManager takes the cgroup parent, and sandbox ID.
// It returns the raw libcontainer cgroup manager for that sandbox.
func (m *CgroupfsManager) SandboxCgroupManager(sbParent, sbID string) (cgroups.Manager, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if !node.CgroupIsV2() {
if cgMgr, ok := m.v1SbCgMgr[sbID]; ok {
return cgMgr, nil
}
}
_, cgPath, err := sandboxCgroupAbsolutePath(sbParent)
if err != nil {
return nil, err
}
cgMgr, err := libctrManager(filepath.Base(cgPath), filepath.Dir(cgPath), false)
if err != nil {
return nil, err
}
if !node.CgroupIsV2() {
// cache only cgroup v1 managers
m.v1SbCgMgr[sbID] = cgMgr
}
return cgMgr, nil
}
// SandboxCgroupStats takes the sandbox parent, and sandbox ID.
// It creates a new cgroup for that sandbox if it does not already exist.
// It returns the cgroup stats for that sandbox.
func (m *CgroupfsManager) SandboxCgroupStats(sbParent, sbID string) (*CgroupStats, error) {
cgMgr, err := m.SandboxCgroupManager(sbParent, sbID)
if err != nil {
return nil, err
}
return statsFromLibctrMgr(cgMgr)
}
// RemoveSandboxCgroupManager removes the cgroup manager for the sandbox.
func (m *CgroupfsManager) RemoveSandboxCgManager(sbID string) {
if !node.CgroupIsV2() {
m.mutex.Lock()
defer m.mutex.Unlock()
delete(m.v1SbCgMgr, sbID)
}
}
// MoveConmonToCgroup takes the container ID, cgroup parent, conmon's cgroup (from the config) and conmon's PID
// It attempts to move conmon to the correct cgroup.
// It returns the cgroupfs parent that conmon was put into
// so that CRI-O can clean the cgroup path of the newly added conmon once the process terminates (systemd handles this for us).
func (*CgroupfsManager) MoveConmonToCgroup(cid, cgroupParent, conmonCgroup string, pid int, resources *rspec.LinuxResources) (cgroupPathToClean string, _ error) {
if conmonCgroup != utils.PodCgroupName && conmonCgroup != "" {
return "", fmt.Errorf("conmon cgroup %s invalid for cgroupfs", conmonCgroup)
}
if resources == nil {
resources = &rspec.LinuxResources{}
}
cgroupPath := fmt.Sprintf("%s/crio-conmon-%s", cgroupParent, cid)
err := applyWorkloadSettings(cgroupPath, resources, pid)
return cgroupPath, err
}
func applyWorkloadSettings(cgPath string, resources *rspec.LinuxResources, pid int) (err error) {
if resources.CPU == nil {
return nil
}
cg := &cgroups.Cgroup{
Path: "/" + cgPath,
Resources: &cgroups.Resources{
SkipDevices: true,
CpusetCpus: resources.CPU.Cpus,
},
Rootless: unshare.IsRootless(),
}
if resources.CPU.Shares != nil {
cg.CpuShares = *resources.CPU.Shares
}
if resources.CPU.Quota != nil {
cg.CpuQuota = *resources.CPU.Quota
}
if resources.CPU.Period != nil {
cg.CpuPeriod = *resources.CPU.Period
}
mgr, err := manager.New(cg)
if err != nil {
return err
}
if err := mgr.Set(cg.Resources); err != nil {
return err
}
if err := mgr.Apply(pid); err != nil {
return fmt.Errorf("failed to add conmon to cgroupfs sandbox cgroup: %w", err)
}
return nil
}
// CreateSandboxCgroup calls the helper function createSandboxCgroup for this manager.
func (m *CgroupfsManager) CreateSandboxCgroup(sbParent, containerID string) error {
// prepend "/" to sbParent so the fs driver interprets it as an absolute path
// and the cgroup isn't created as a relative path to the cgroups of the CRI-O process.
// https://github.com/opencontainers/runc/blob/fd5debf3aa/libcontainer/cgroups/fs/paths.go#L156
return createSandboxCgroup(filepath.Join("/", sbParent), containerCgroupPath(containerID))
}
// RemoveSandboxCgroup calls the helper function removeSandboxCgroup for this manager.
func (m *CgroupfsManager) RemoveSandboxCgroup(sbParent, containerID string) error {
// prepend "/" to sbParent so the fs driver interprets it as an absolute path
// and the cgroup isn't created as a relative path to the cgroups of the CRI-O process.
// https://github.com/opencontainers/runc/blob/fd5debf3aa/libcontainer/cgroups/fs/paths.go#L156
return removeSandboxCgroup(filepath.Join("/", sbParent), containerCgroupPath(containerID))
}
package cgmgr
import (
"context"
"math"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/manager"
"github.com/cri-o/cri-o/internal/config/node"
"github.com/cri-o/cri-o/internal/log"
)
// This is a universal stats object to be used across different runtime implementations.
// We could have used the libcontainer/cgroups.Stats object as a standard stats object for cri-o.
// But due to it's incompatibility with non-linux platforms,
// we have to create our own object that can be moved around regardless of the runtime.
type CgroupStats struct {
Memory *MemoryStats
CPU *CPUStats
Hugetlb map[string]HugetlbStats
Pid *PidsStats
DiskIO *DiskIOStats
SystemNano int64
}
type MemoryStats struct {
Usage uint64
Cache uint64
Limit uint64
MaxUsage uint64
WorkingSetBytes uint64
RssBytes uint64
PageFaults uint64
MajorPageFaults uint64
AvailableBytes uint64
KernelUsage uint64
KernelTCPUsage uint64
SwapUsage uint64
SwapLimit uint64
// Amount of cached filesystem data mapped with mmap().
FileMapped uint64
// The number of memory usage hits limits. For cgroup v1 only.
Failcnt uint64
}
type CPUStats struct {
TotalUsageNano uint64
PerCPUUsage []uint64
// Time spent by tasks of the cgroup in kernel mode in nanoseconds.
UsageInKernelmode uint64
// Time spent by tasks of the cgroup in user mode in nanoseconds.
UsageInUsermode uint64
// Number of periods with throttling active
ThrottlingActivePeriods uint64
// Number of periods when the container hit its throttling limit.
ThrottledPeriods uint64
// Aggregate time the container was throttled for in nanoseconds.
ThrottledTime uint64
}
type HugetlbStats struct {
Usage uint64
Max uint64
}
type PidsStats struct {
Current uint64
Limit uint64
Pids []int
FileDescriptors uint64
Sockets uint64
Threads uint64
ThreadsMax uint64
UlimitsSoft uint64
}
type DiskIOStats struct {
IoServiceBytes []cgroups.BlkioStatEntry
IoServiced []cgroups.BlkioStatEntry
}
// MemLimitGivenSystem limit returns the memory limit for a given cgroup
// If the configured memory limit is larger than the total memory on the sys, the
// physical system memory size is returned.
func MemLimitGivenSystem(cgroupLimit uint64) uint64 {
si := &syscall.Sysinfo_t{}
err := syscall.Sysinfo(si)
if err != nil {
return cgroupLimit
}
// conversion to uint64 needed to build on 32-bit
// but lint complains about unnecessary conversion
// see: pr#2409
physicalLimit := uint64(si.Totalram) //nolint:unconvert
if cgroupLimit > physicalLimit {
return physicalLimit
}
return cgroupLimit
}
func libctrManager(cgroup, parent string, systemd bool) (cgroups.Manager, error) {
if systemd {
parent = filepath.Base(parent)
if parent == "." {
// libcontainer shorthand for root
// see https://github.com/opencontainers/runc/blob/9fffadae8/libcontainer/cgroups/systemd/common.go#L71
parent = "-.slice"
}
}
cg := &cgroups.Cgroup{
Name: cgroup,
Parent: parent,
Resources: &cgroups.Resources{
SkipDevices: true,
},
Systemd: systemd,
// If the cgroup manager is systemd, then libcontainer
// will construct the cgroup path (for scopes) as:
// ScopePrefix-Name.scope. For slices, and for cgroupfs manager,
// this will be ignored.
// See: https://github.com/opencontainers/runc/tree/main/libcontainer/cgroups/systemd/common.go:getUnitName
ScopePrefix: CrioPrefix,
}
return manager.New(cg)
}
func statsFromLibctrMgr(cgMgr cgroups.Manager) (*CgroupStats, error) {
stats, err := cgMgr.GetStats()
if err != nil {
return nil, err
}
pids, err := cgMgr.GetPids()
if err != nil {
return nil, err
}
return &CgroupStats{
Memory: cgroupMemStats(&stats.MemoryStats),
CPU: cgroupCPUStats(&stats.CpuStats),
Hugetlb: cgroupHugetlbStats(stats.HugetlbStats),
Pid: cgroupPidStats(stats, pids),
DiskIO: &DiskIOStats{
IoServiced: stats.BlkioStats.IoServicedRecursive,
IoServiceBytes: stats.BlkioStats.IoServiceBytesRecursive,
},
SystemNano: time.Now().UnixNano(),
}, nil
}
func cgroupMemStats(memStats *cgroups.MemoryStats) *MemoryStats {
var (
workingSetBytes uint64
rssBytes uint64
pageFaults uint64
majorPageFaults uint64
usageBytes uint64
availableBytes uint64
inactiveFileName string
memSwap uint64
fileMapped uint64
failcnt uint64
)
usageBytes = memStats.Usage.Usage
if node.CgroupIsV2() {
// Use anon for rssBytes for cgroup v2 as in cAdvisor
// See: https://github.com/google/cadvisor/blob/786dbcfdf5b1aae8341b47e71ab115066a9b4c06/container/libcontainer/handler.go#L809
rssBytes = memStats.Stats["anon"]
inactiveFileName = "inactive_file"
pageFaults = memStats.Stats["pgfault"]
majorPageFaults = memStats.Stats["pgmajfault"]
fileMapped = memStats.Stats["file_mapped"]
// libcontainer adds memory.swap.current to memory.current and reports them as SwapUsage to be compatible with cgroup v1,
// because cgroup v1 reports SwapUsage as mem+swap combined.
// Here we subtract SwapUsage from memory usage to get the actual swap value.
memSwap = memStats.SwapUsage.Usage - usageBytes
} else {
inactiveFileName = "total_inactive_file"
rssBytes = memStats.Stats["total_rss"]
memSwap = memStats.SwapUsage.Usage
fileMapped = memStats.Stats["mapped_file"]
if memStats.UseHierarchy {
fileMapped = memStats.Stats["total_mapped_file"]
}
// cgroup v1 doesn't have equivalent stats for pgfault and pgmajfault
failcnt = memStats.Usage.Failcnt
}
workingSetBytes = usageBytes
if v, ok := memStats.Stats[inactiveFileName]; ok {
if workingSetBytes < v {
workingSetBytes = 0
} else {
workingSetBytes -= v
}
}
if !isMemoryUnlimited(memStats.Usage.Limit) {
// https://github.com/kubernetes/kubernetes/blob/94f15bbbcbe952762b7f5e6e3f77d86ecec7d7c2/pkg/kubelet/stats/helper.go#L69
availableBytes = memStats.Usage.Limit - workingSetBytes
}
return &MemoryStats{
Usage: usageBytes,
Cache: memStats.Cache,
Limit: memStats.Usage.Limit,
MaxUsage: memStats.Usage.MaxUsage,
WorkingSetBytes: workingSetBytes,
RssBytes: rssBytes,
PageFaults: pageFaults,
MajorPageFaults: majorPageFaults,
AvailableBytes: availableBytes,
KernelUsage: memStats.KernelUsage.Usage,
KernelTCPUsage: memStats.KernelTCPUsage.Usage,
SwapUsage: memSwap,
SwapLimit: memStats.SwapUsage.Limit,
FileMapped: fileMapped,
Failcnt: failcnt,
}
}
func cgroupCPUStats(cpuStats *cgroups.CpuStats) *CPUStats {
return &CPUStats{
TotalUsageNano: cpuStats.CpuUsage.TotalUsage,
PerCPUUsage: cpuStats.CpuUsage.PercpuUsage,
UsageInKernelmode: cpuStats.CpuUsage.UsageInKernelmode,
UsageInUsermode: cpuStats.CpuUsage.UsageInUsermode,
ThrottlingActivePeriods: cpuStats.ThrottlingData.Periods,
ThrottledPeriods: cpuStats.ThrottlingData.ThrottledPeriods,
ThrottledTime: cpuStats.ThrottlingData.ThrottledTime,
}
}
func cgroupHugetlbStats(cgHugetlbStats map[string]cgroups.HugetlbStats) map[string]HugetlbStats {
hugetlbStats := map[string]HugetlbStats{}
for pagesize, hugetlb := range cgHugetlbStats {
hugetlbStats[pagesize] = HugetlbStats{
Usage: hugetlb.Usage,
Max: hugetlb.MaxUsage,
}
}
return hugetlbStats
}
func isMemoryUnlimited(v uint64) bool {
// if the container has unlimited memory, the value of memory.max (in cgroupv2) will be "max"
// or the value of memory.limit_in_bytes (in cgroupv1) will be -1
// either way, libcontainer/cgroups will return math.MaxUint64
return v == math.MaxUint64
}
func cgroupPidStats(stats *cgroups.Stats, pids []int) *PidsStats {
var fdCount, socketCount, ulimitsSoft uint64
// This is based on the cadvisor handler: https://github.com/google/cadvisor/blob/master/container/libcontainer/handler.go
for _, pid := range pids {
addFdsForProcess(pid, &fdCount, &socketCount)
addUlimitsForProcess(pid, &ulimitsSoft)
}
return &PidsStats{
Current: stats.PidsStats.Current,
Limit: stats.PidsStats.Limit,
Pids: pids,
FileDescriptors: fdCount,
Sockets: socketCount,
Threads: stats.PidsStats.Current,
ThreadsMax: stats.PidsStats.Limit,
UlimitsSoft: ulimitsSoft,
}
}
func addFdsForProcess(pid int, fdCount, socketCount *uint64) {
if fdCount == nil || socketCount == nil {
panic("Programming error: fdCount or socketCount should not be nil")
}
dirPath := path.Join("/proc", strconv.Itoa(pid), "fd")
fds, err := os.ReadDir(dirPath)
if err != nil {
log.Infof(context.Background(), "error while listing directory %q to measure fd count: %v", dirPath, err)
return
}
*fdCount += uint64(len(fds))
for _, fd := range fds {
fdPath := path.Join(dirPath, fd.Name())
linkName, err := os.Readlink(fdPath)
if err != nil {
log.Infof(context.Background(), "error while reading %q link: %v", fdPath, err)
continue
}
if strings.HasPrefix(linkName, "socket") {
*socketCount++
}
}
}
func addUlimitsForProcess(pid int, limits *uint64) {
if limits == nil {
panic("Programming error: limits should not be nil")
}
limitsPath := path.Join("/proc", strconv.Itoa(pid), "limits")
limitsData, err := os.ReadFile(limitsPath)
if err != nil {
log.Infof(context.Background(), "error while reading %q to get thread limits: %v", limitsPath, err)
return
}
for line := range strings.SplitSeq(string(limitsData), "\n") {
if !strings.HasPrefix(line, "Max open files") {
continue
}
const maxOpenFilesPrefix = "Max open files"
remainingLine := strings.TrimSpace(line[len(maxOpenFilesPrefix):])
fields := strings.Fields(remainingLine)
if len(fields) >= 1 {
if softLimit, err := strconv.ParseUint(fields[0], 10, 64); err == nil {
*limits = softLimit
}
}
return
}
}
//go:build linux
package cgmgr
import (
"fmt"
"path"
"path/filepath"
"strings"
"sync"
"github.com/containers/storage/pkg/unshare"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
"github.com/godbus/dbus/v5"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/systemd"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"github.com/cri-o/cri-o/internal/config/node"
"github.com/cri-o/cri-o/internal/dbusmgr"
"github.com/cri-o/cri-o/utils"
)
const defaultSystemdParent = "system.slice"
// SystemdManager is the parent type of SystemdV{1,2}Manager.
// it defines all of the common functionality between V1 and V2.
type SystemdManager struct {
memoryPath, memoryMaxFile string
// a map of container ID to cgroup manager for cgroup v1
// the reason we need this for v1 only is because the cost of creating a cgroup manager for v2 is very low
// therefore, we don't need to cache it
v1CtrCgMgr map[string]cgroups.Manager
// a map of sandbox ID to cgroup manager for cgroup v1
v1SbCgMgr map[string]cgroups.Manager
dbusMgr *dbusmgr.DbusConnManager
mutex sync.Mutex
}
func NewSystemdManager() *SystemdManager {
systemdMgr := SystemdManager{}
if node.CgroupIsV2() {
systemdMgr.memoryPath = CgroupMemoryPathV2
systemdMgr.memoryMaxFile = cgroupMemoryMaxFileV2
} else {
systemdMgr.memoryPath = CgroupMemoryPathV1
systemdMgr.memoryMaxFile = cgroupMemoryMaxFileV1
systemdMgr.v1CtrCgMgr = make(map[string]cgroups.Manager)
systemdMgr.v1SbCgMgr = make(map[string]cgroups.Manager)
}
systemdMgr.dbusMgr = dbusmgr.NewDbusConnManager(unshare.IsRootless())
return &systemdMgr
}
// Name returns the name of the cgroup manager (systemd).
func (*SystemdManager) Name() string {
return systemdCgroupManager
}
// IsSystemd returns that it is a systemd cgroup manager.
func (*SystemdManager) IsSystemd() bool {
return true
}
// ContainerCgroupPath takes arguments sandbox parent cgroup and container ID and returns
// the cgroup path for that containerID. If parentCgroup is empty, it
// uses the default parent system.slice.
func (*SystemdManager) ContainerCgroupPath(sbParent, containerID string) string {
parent := defaultSystemdParent
if sbParent != "" {
parent = sbParent
}
return parent + ":" + CrioPrefix + ":" + containerID
}
func (m *SystemdManager) ContainerCgroupAbsolutePath(sbParent, containerID string) (string, error) {
parent := defaultSystemdParent
if sbParent != "" {
parent = sbParent
}
logrus.Debugf("Expanding systemd cgroup slice %v", parent)
cgroup, err := systemd.ExpandSlice(parent)
if err != nil {
return "", fmt.Errorf("expanding systemd slice to get container %s stats: %w", containerID, err)
}
return filepath.Join(cgroup, containerCgroupPath(containerID)+".scope"), nil
}
// ContainerCgroupManager takes the cgroup parent, and container ID.
// It returns the raw libcontainer cgroup manager for that container.
func (m *SystemdManager) ContainerCgroupManager(sbParent, containerID string) (cgroups.Manager, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if !node.CgroupIsV2() {
if cgMgr, ok := m.v1CtrCgMgr[containerID]; ok {
return cgMgr, nil
}
}
cgPath, err := m.ContainerCgroupAbsolutePath(sbParent, containerID)
if err != nil {
return nil, err
}
// Due to a quirk of libcontainer's cgroup driver, cgroup name = containerID
cgMgr, err := libctrManager(containerID, filepath.Dir(cgPath), true)
if err != nil {
return nil, err
}
if !node.CgroupIsV2() {
// cache only cgroup v1 managers
m.v1CtrCgMgr[containerID] = cgMgr
}
return cgMgr, nil
}
// ContainerCgroupStats takes the sandbox parent, and container ID.
// It creates a new cgroup if one does not already exist.
// It returns the cgroup stats for that container.
func (m *SystemdManager) ContainerCgroupStats(sbParent, containerID string) (*CgroupStats, error) {
cgMgr, err := m.ContainerCgroupManager(sbParent, containerID)
if err != nil {
return nil, err
}
return statsFromLibctrMgr(cgMgr)
}
// RemoveContainerCgManager removes the cgroup manager for the container.
func (m *SystemdManager) RemoveContainerCgManager(containerID string) {
if !node.CgroupIsV2() {
m.mutex.Lock()
defer m.mutex.Unlock()
delete(m.v1CtrCgMgr, containerID)
}
}
// MoveConmonToCgroup takes the container ID, cgroup parent, conmon's cgroup (from the config) and conmon's PID
// It attempts to move conmon to the correct cgroup.
// cgroupPathToClean should always be returned empty. It is part of the interface to return the cgroup path
// that cri-o is responsible for cleaning up upon the container's death.
// Systemd takes care of this cleaning for us, so return an empty string.
func (m *SystemdManager) MoveConmonToCgroup(cid, cgroupParent, conmonCgroup string, pid int, resources *rspec.LinuxResources) (cgroupPathToClean string, _ error) {
if strings.HasSuffix(conmonCgroup, ".slice") {
cgroupParent = conmonCgroup
}
conmonUnitName := fmt.Sprintf("crio-conmon-%s.scope", cid)
// Set the systemd KillSignal to SIGPIPE that conmon ignores.
// This helps during node shutdown so that conmon waits for the container
// to exit and doesn't forward the SIGTERM that it gets.
props := []systemdDbus.Property{
{
Name: "KillSignal",
Value: dbus.MakeVariant(int(unix.SIGPIPE)),
},
systemdDbus.PropAfter("crio.service"),
}
if resources != nil && resources.CPU != nil {
if resources.CPU.Cpus != "" {
if !node.SystemdHasAllowedCPUs() {
logrus.Errorf("Systemd does not support AllowedCPUs; skipping setting for workload")
} else {
bits, err := systemd.RangeToBits(resources.CPU.Cpus)
if err != nil {
return "", fmt.Errorf("cpuset conversion error: %w", err)
}
props = append(props, systemdDbus.Property{
Name: "AllowedCPUs",
Value: dbus.MakeVariant(bits),
})
}
}
if resources.CPU.Shares != nil {
props = append(props, systemdDbus.Property{
Name: "CPUShares",
Value: dbus.MakeVariant(resources.CPU.Shares),
})
}
if resources.CPU.Quota != nil {
props = append(props, systemdDbus.Property{
Name: "CPUQuota",
Value: dbus.MakeVariant(resources.CPU.Quota),
})
}
if resources.CPU.Period != nil {
props = append(props, systemdDbus.Property{
Name: "CPUQuotaPeriodSec",
Value: dbus.MakeVariant(resources.CPU.Period),
})
}
}
logrus.Debugf("Running conmon under slice %s and unitName %s", cgroupParent, conmonUnitName)
if err := utils.RunUnderSystemdScope(m.dbusMgr, pid, cgroupParent, conmonUnitName, props...); err != nil {
return "", fmt.Errorf("failed to add conmon to systemd sandbox cgroup: %w", err)
}
// return empty string as path because cgroup cleanup is done by systemd
return "", nil
}
// SandboxCgroupPath takes the sandbox parent, sandbox ID, and container minimum memory.
// It returns the cgroup parent, cgroup path, and error.
// It also checks if enough memory is available in the given cgroup.
func (m *SystemdManager) SandboxCgroupPath(sbParent, sbID string, containerMinMemory int64) (cgParent, cgPath string, _ error) {
if sbParent == "" {
return "", "", nil
}
if !strings.HasSuffix(filepath.Base(sbParent), ".slice") {
return "", "", fmt.Errorf("cri-o configured with systemd cgroup manager, but did not receive slice as parent: %s", sbParent)
}
cgParent = convertCgroupFsNameToSystemd(sbParent)
if err := verifyCgroupHasEnoughMemory(sbParent, m.memoryPath, m.memoryMaxFile, containerMinMemory); err != nil {
return "", "", err
}
cgPath = cgParent + ":" + CrioPrefix + ":" + sbID
return cgParent, cgPath, nil
}
// SandboxCgroupManager takes the cgroup parent, and sandbox ID.
// It returns the raw libcontainer cgroup manager for that sandbox.
func (m *SystemdManager) SandboxCgroupManager(sbParent, sbID string) (cgroups.Manager, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if !node.CgroupIsV2() {
if cgMgr, ok := m.v1SbCgMgr[sbID]; ok {
return cgMgr, nil
}
}
_, cgPath, err := sandboxCgroupAbsolutePath(sbParent)
if err != nil {
return nil, err
}
cgMgr, err := libctrManager(filepath.Base(cgPath), filepath.Dir(cgPath), true)
if err != nil {
return nil, err
}
if !node.CgroupIsV2() {
// cache only cgroup v1 managers
m.v1SbCgMgr[sbID] = cgMgr
}
return cgMgr, nil
}
// SandboxCgroupStats takes the sandbox parent, and sandbox ID.
// It creates a new cgroup for that sandbox if it does not already exist.
// It returns the cgroup stats for that sandbox.
func (m *SystemdManager) SandboxCgroupStats(sbParent, sbID string) (*CgroupStats, error) {
cgMgr, err := m.SandboxCgroupManager(sbParent, sbID)
if err != nil {
return nil, err
}
return statsFromLibctrMgr(cgMgr)
}
// RemoveSandboxCgroupManager removes cgroup manager for the sandbox.
func (m *SystemdManager) RemoveSandboxCgManager(sbID string) {
if !node.CgroupIsV2() {
m.mutex.Lock()
defer m.mutex.Unlock()
delete(m.v1SbCgMgr, sbID)
}
}
//nolint:unparam // golangci-lint claims cgParent is unused, though it's being used to include documentation inline.
func sandboxCgroupAbsolutePath(sbParent string) (cgParent, slicePath string, err error) {
cgParent = convertCgroupFsNameToSystemd(sbParent)
slicePath, err = systemd.ExpandSlice(cgParent)
if err != nil {
return "", "", fmt.Errorf("expanding systemd slice path for %q: %w", cgParent, err)
}
return cgParent, slicePath, nil
}
// convertCgroupFsNameToSystemd converts an expanded cgroupfs name to its systemd name.
// For example, it will convert test.slice/test-a.slice/test-a-b.slice to become test-a-b.slice.
func convertCgroupFsNameToSystemd(cgroupfsName string) string {
// TODO: see if libcontainer systemd implementation could use something similar, and if so, move
// this function up to that library. At that time, it would most likely do validation specific to systemd
// above and beyond the simple assumption here that the base of the path encodes the hierarchy
// per systemd convention.
return path.Base(cgroupfsName)
}
// CreateSandboxCgroup calls the helper function createSandboxCgroup for this manager.
// Note: createSandboxCgroup will create a cgroupfs cgroup for the infra container underneath the pod slice.
// It will not use dbus to create this cgroup, but instead call libcontainer's cgroupfs manager directly.
// This is because a scope created here will not have a process within it (as it's usually for a dropped infra container),
// and a slice cannot have the required `crio` prefix (while still being within the pod slice).
// Ultimately, this cgroup is required for cAdvisor to be able to register the pod and collect network metrics for it.
// This work will not be relevant when CRI-O is responsible for gathering pod metrics (KEP-2371), but is required until that's done.
func (m *SystemdManager) CreateSandboxCgroup(sbParent, containerID string) error {
// sbParent should always be specified by kubelet, but sometimes not by critest/crictl.
// Skip creation in this case.
if sbParent == "" {
logrus.Infof("Not creating sandbox cgroup: sbParent is empty")
return nil
}
expandedParent, err := systemd.ExpandSlice(sbParent)
if err != nil {
return err
}
return createSandboxCgroup(expandedParent, containerCgroupPath(containerID))
}
// RemoveSandboxCgroup calls the helper function removeSandboxCgroup for this manager.
func (m *SystemdManager) RemoveSandboxCgroup(sbParent, containerID string) error {
// sbParent should always be specified by kubelet, but sometimes not by critest/crictl.
// Skip creation in this case.
if sbParent == "" {
logrus.Infof("Not creating sandbox cgroup: sbParent is empty")
return nil
}
expandedParent, err := systemd.ExpandSlice(sbParent)
if err != nil {
return err
}
return removeSandboxCgroup(expandedParent, containerCgroupPath(containerID))
}
package cnimgr
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
)
type PodNetworkLister func() ([]*ocicni.PodNetwork, error)
type CNIManager struct {
// cniPlugin is the internal OCI CNI plugin
plugin ocicni.CNIPlugin
lastError error
watchers []chan bool
shutdown bool
mutex sync.RWMutex
validPodList PodNetworkLister
}
func New(defaultNetwork, networkDir string, pluginDirs ...string) (*CNIManager, error) {
// Init CNI plugin
plugin, err := ocicni.InitCNI(
defaultNetwork, networkDir, pluginDirs...,
)
if err != nil {
return nil, fmt.Errorf("initialize CNI plugin: %w", err)
}
mgr := &CNIManager{
plugin: plugin,
lastError: errors.New("plugin status uninitialized"),
}
go mgr.pollUntilReady()
return mgr, nil
}
func (c *CNIManager) pollUntilReady() {
//nolint:errcheck,staticcheck
_ = wait.PollInfinite(500*time.Millisecond, c.pollFunc)
}
func (c *CNIManager) pollFunc() (bool, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.shutdown {
return true, nil
}
if err := c.plugin.Status(); err != nil {
c.lastError = err
return false, nil
}
// on startup, GC might have been attempted before the plugin was actually
// ready so we might have deferred it until now, which is still a good time
// to do it as the relevant context is equivalent: the same list of pods is
// valid and stable because new pods can't be created until the plugin is
// announced as ready
if err := c.doGC(context.Background()); err != nil {
logrus.Warnf("Garbage collect stale network resources during plugin startup failed: %v", err)
}
c.lastError = nil
for _, watcher := range c.watchers {
watcher <- true
}
return true, nil
}
// ReadyOrError returns nil if the plugin is ready,
// or the last error that was received in checking.
func (c *CNIManager) ReadyOrError() error {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c.lastError
}
// Plugin returns the CNI plugin.
func (c *CNIManager) Plugin() ocicni.CNIPlugin {
return c.plugin
}
// Add watcher creates a new watcher for the CNI manager
// said watcher will send a `true` value if the CNI plugin was successfully ready
// or `false` if the server shutdown first.
func (c *CNIManager) AddWatcher() chan bool {
c.mutex.Lock()
defer c.mutex.Unlock()
watcher := make(chan bool, 1)
c.watchers = append(c.watchers, watcher)
return watcher
}
// Shutdown shuts down the CNI manager, and notifies the watcher
// that the CNI manager is not ready.
func (c *CNIManager) Shutdown() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.shutdown = true
for _, watcher := range c.watchers {
watcher <- false
}
}
// GC calls the plugin's GC to clean up any resources concerned with stale pods
// (pod other than the ones provided by validPodList). The call to the plugin
// will be deferred until it is ready logging any errors then and returning nil
// error here.
func (c *CNIManager) GC(ctx context.Context, validPodList PodNetworkLister) error {
c.mutex.Lock()
defer c.mutex.Unlock()
c.validPodList = validPodList
if c.lastError != nil {
// on startup, GC might be attempted before the plugin is actually ready
// so defer until it is (see pollFunc)
return nil
}
return c.doGC(ctx)
}
func (c *CNIManager) doGC(ctx context.Context) error {
if c.validPodList == nil {
return nil
}
validPods, err := c.validPodList()
if err != nil {
return err
}
// give a GC call 30s
stopCtx, stopCancel := context.WithTimeout(ctx, 30*time.Second)
defer stopCancel()
return c.plugin.GC(stopCtx, validPods)
}
// All *_inject.go files are meant to be used by tests only. Purpose of this
// files is to provide a way to inject mocked data into the current setup.
package cnimgr
import (
"github.com/cri-o/ocicni/pkg/ocicni"
)
// SetCNIPlugin sets the network plugin for the Configuration. The function
// errors if a sane shutdown of the initially created network plugin failed.
func (c *CNIManager) SetCNIPlugin(plugin ocicni.CNIPlugin) error {
if c.plugin != nil {
if err := c.plugin.Shutdown(); err != nil {
return err
}
}
c.plugin = plugin
// initialize the poll, but don't run it continuously (or else the mocks will get weird)
//nolint:errcheck
_, _ = c.pollFunc()
return nil
}
package conmonmgr
import (
"bytes"
"fmt"
"path"
"strings"
"github.com/blang/semver/v4"
"github.com/sirupsen/logrus"
"github.com/cri-o/cri-o/utils/cmdrunner"
)
var (
versionSupportsSync = semver.MustParse("2.0.19")
versionSupportsLogGlobalSizeMax = semver.MustParse("2.1.2")
)
type ConmonManager struct {
conmonVersion *semver.Version
supportsSync bool
supportsLogGlobalSizeMax bool
}
// this function is heavily based on github.com/containers/common#probeConmon.
func New(conmonPath string) (*ConmonManager, error) {
if !path.IsAbs(conmonPath) {
return nil, fmt.Errorf("conmon path is not absolute: %s", conmonPath)
}
out, err := cmdrunner.CombinedOutput(conmonPath, "--version")
if err != nil {
return nil, fmt.Errorf("get conmon version: %w", err)
}
fields := strings.Fields(string(out))
if len(fields) < 3 {
return nil, fmt.Errorf("conmon version output too short: expected three fields, got %d in %s", len(fields), out)
}
c := new(ConmonManager)
if err := c.parseConmonVersion(fields[2]); err != nil {
return nil, fmt.Errorf("parse conmon version: %w", err)
}
c.initializeSupportsSync()
c.initializeSupportsLogGlobalSizeMax(conmonPath)
return c, nil
}
func (c *ConmonManager) parseConmonVersion(versionString string) error {
parsedVersion, err := semver.New(versionString)
if err != nil {
return err
}
c.conmonVersion = parsedVersion
return nil
}
func (c *ConmonManager) initializeSupportsLogGlobalSizeMax(conmonPath string) {
c.supportsLogGlobalSizeMax = c.conmonVersion.GTE(versionSupportsLogGlobalSizeMax)
if !c.supportsLogGlobalSizeMax {
// Read help output as a fallback in case the feature was backported to conmon,
// but the version wasn't bumped.
helpOutput, err := cmdrunner.CombinedOutput(conmonPath, "--help")
c.supportsLogGlobalSizeMax = err == nil && bytes.Contains(helpOutput, []byte("--log-global-size-max"))
}
verb := "does not"
if c.supportsLogGlobalSizeMax {
verb = "does"
}
logrus.Infof("Conmon %s support the --log-global-size-max option", verb)
}
func (c *ConmonManager) SupportsLogGlobalSizeMax() bool {
return c.supportsLogGlobalSizeMax
}
func (c *ConmonManager) initializeSupportsSync() {
c.supportsSync = c.conmonVersion.GTE(versionSupportsSync)
verb := "does not"
if c.supportsSync {
verb = "does"
}
logrus.Infof("Conmon %s support the --sync option", verb)
}
func (c *ConmonManager) SupportsSync() bool {
return c.supportsSync
}
package device
import (
"fmt"
"strings"
"github.com/opencontainers/runc/libcontainer/devices"
rspec "github.com/opencontainers/runtime-spec/specs-go"
)
// DeviceAnnotationDelim is the character
// used to separate devices in the annotation
// `io.kubernetes.cri-o.Devices`.
const DeviceAnnotationDelim = ","
// Config is the internal device configuration
// it holds onto the contents of the additional_devices
// field, allowing admins to configure devices that are given
// to all containers.
type Config struct {
devices []Device
}
// Device holds the runtime spec
// fields needed for a device.
type Device struct {
Device rspec.LinuxDevice
Resource rspec.LinuxDeviceCgroup
}
// New creates a new device Config.
func New() *Config {
return &Config{
devices: make([]Device, 0),
}
}
// LoadDevices takes a slice of strings of additional_devices
// specified in the config.
// It saves the resulting Device structs, so they are
// processed once and used later.
func (d *Config) LoadDevices(devsFromConfig []string) error {
devs, err := devicesFromStrings(devsFromConfig, nil)
if err != nil {
return err
}
d.devices = devs
return nil
}
// DevicesFromAnnotation takes an annotation string of the form
// io.kubernetes.cri-o.Device=$PATH:$PATH:$MODE,$PATH...
// and returns a Device object that can be passed to a create config.
func DevicesFromAnnotation(annotation string, allowedDevices []string) ([]Device, error) {
allowedMap := make(map[string]struct{})
for _, d := range allowedDevices {
allowedMap[d] = struct{}{}
}
return devicesFromStrings(strings.Split(annotation, DeviceAnnotationDelim), allowedMap)
}
// devicesFromStrings takes a slice of strings in the form $PATH{:$PATH}{:$MODE}
// Where the first path is the path to the device on the host
// The second is where the device will be put in the container (optional)
// and the third is the mode the device will be mounted with (optional)
// It returns a slice of Device structs, ready to be saved or given to a container
// runtime spec generator.
func devicesFromStrings(devsFromConfig []string, allowedDevices map[string]struct{}) ([]Device, error) {
linuxdevs := make([]Device, 0, len(devsFromConfig))
for _, d := range devsFromConfig {
// ignore empty entries
if d == "" {
continue
}
src, dst, permissions, err := parseDevice(d)
if err != nil {
return nil, err
}
if allowedDevices != nil {
if _, ok := allowedDevices[src]; !ok {
return nil, fmt.Errorf("device %s is specified but is not in allowed_devices", src)
}
}
// ParseDevice does not check the destination is in /dev,
// but it should be checked
if !strings.HasPrefix(dst, "/dev/") {
return nil, fmt.Errorf("invalid device mode: %s", dst)
}
dev, err := devices.DeviceFromPath(src, permissions)
if err != nil {
return nil, fmt.Errorf("%s is not a valid device: %w", src, err)
}
dev.Path = dst
linuxdevs = append(linuxdevs,
Device{
Device: rspec.LinuxDevice{
Path: dev.Path,
Type: string(dev.Type),
Major: dev.Major,
Minor: dev.Minor,
FileMode: &dev.FileMode,
UID: &dev.Uid,
GID: &dev.Gid,
},
Resource: rspec.LinuxDeviceCgroup{
Allow: true,
Type: string(dev.Type),
Major: &dev.Major,
Minor: &dev.Minor,
Access: permissions,
},
})
}
return linuxdevs, nil
}
// Devices returns the devices saved in the Config.
func (d *Config) Devices() []Device {
return d.devices
}
// ParseDevice parses device mapping string to a src, dest & permissions string.
func parseDevice(device string) (src, dst, permissions string, err error) {
permissions = "rwm"
arr := strings.Split(device, ":")
switch len(arr) {
case 3:
if !isValidDeviceMode(arr[2]) {
return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2])
}
permissions = arr[2]
fallthrough
case 2:
if isValidDeviceMode(arr[1]) {
permissions = arr[1]
} else {
if arr[1] != "" && arr[1][0] != '/' {
return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1])
}
dst = arr[1]
}
fallthrough
case 1:
src = arr[0]
default:
return "", "", "", fmt.Errorf("invalid device specification: %s", device)
}
if dst == "" {
dst = src
}
return src, dst, permissions, nil
}
// IsValidDeviceMode checks if the mode for device is valid or not.
// IsValid mode is a composition of r (read), w (write), and m (mknod).
func isValidDeviceMode(mode string) bool {
legalDeviceMode := map[rune]bool{
'r': true,
'w': true,
'm': true,
}
if mode == "" {
return false
}
for _, c := range mode {
if !legalDeviceMode[c] {
return false
}
legalDeviceMode[c] = false
}
return true
}
//go:build linux
package node
import (
"errors"
"os"
"path/filepath"
"slices"
"sync"
"github.com/containers/common/pkg/cgroups"
libctrcgroups "github.com/opencontainers/cgroups"
)
var (
cgroupHasMemorySwapOnce sync.Once
cgroupHasMemorySwap bool
cgroupHasMemorySwapErr error
cgroupControllerOnce sync.Once
cgroupControllerErr error
cgroupHasHugetlb bool
cgroupHasPid bool
cgroupIsV2Err error
)
func CgroupIsV2() bool {
var cgroupIsV2 bool
cgroupIsV2, cgroupIsV2Err = cgroups.IsCgroup2UnifiedMode()
return cgroupIsV2
}
// CgroupHasMemorySwap returns whether the memory swap controller is present.
func CgroupHasMemorySwap() bool {
cgroupHasMemorySwapOnce.Do(func() {
if CgroupIsV2() {
cg, err := libctrcgroups.ParseCgroupFile("/proc/self/cgroup")
if err != nil {
cgroupHasMemorySwapErr = err
cgroupHasMemorySwap = false
return
}
memSwap := filepath.Join("/sys/fs/cgroup", cg[""], "memory.swap.current")
if _, err := os.Stat(memSwap); err != nil {
cgroupHasMemorySwap = false
return
}
cgroupHasMemorySwap = true
return
}
_, err := os.Stat("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes")
if err != nil {
cgroupHasMemorySwapErr = errors.New("node not configured with memory swap")
cgroupHasMemorySwap = false
return
}
cgroupHasMemorySwap = true
})
return cgroupHasMemorySwap
}
// CgroupHasHugetlb returns whether the hugetlb controller is present.
func CgroupHasHugetlb() bool {
checkRelevantControllers()
return cgroupHasHugetlb
}
// CgroupHasPid returns whether the pid controller is present.
func CgroupHasPid() bool {
checkRelevantControllers()
return cgroupHasPid
}
func checkRelevantControllers() {
cgroupControllerOnce.Do(func() {
relevantControllers := []struct {
name string
enabled *bool
}{
{
name: "pids",
enabled: &cgroupHasPid,
},
{
name: "hugetlb",
enabled: &cgroupHasHugetlb,
},
}
ctrls, err := libctrcgroups.GetAllSubsystems()
if err != nil {
cgroupControllerErr = err
return
}
for _, toCheck := range relevantControllers {
if slices.Contains(ctrls, toCheck.name) {
*toCheck.enabled = true
}
}
})
}
//go:build linux
package node
import (
"fmt"
"github.com/sirupsen/logrus"
)
// ValidateConfig initializes and validates all of the singleton variables
// that store the node's configuration.
// Currently, we check hugetlb, cgroup v1 or v2, pid and memory swap support for cgroups.
// We check the error at server configuration validation, and if we error, shutdown
// cri-o early, instead of when we're already trying to run containers.
func ValidateConfig() error {
cgroupIsV2 := CgroupIsV2()
toInit := []struct {
name string
init func() bool
err *error
activated *bool
fatal bool
}{
{
name: "hugetlb cgroup",
init: CgroupHasHugetlb,
err: &cgroupControllerErr,
activated: &cgroupHasHugetlb,
fatal: true,
},
{
name: "pid cgroup",
init: CgroupHasPid,
err: &cgroupControllerErr,
activated: &cgroupHasPid,
fatal: true,
},
{
name: "memoryswap cgroup",
init: CgroupHasMemorySwap,
err: &cgroupHasMemorySwapErr,
activated: &cgroupHasMemorySwap,
fatal: false,
},
{
name: "cgroup v2",
init: CgroupIsV2,
err: &cgroupIsV2Err,
activated: &cgroupIsV2,
fatal: false,
},
{
name: "systemd AllowedCPUs",
init: SystemdHasAllowedCPUs,
err: &systemdHasAllowedCPUsErr,
activated: &systemdHasAllowedCPUs,
fatal: false,
},
{
name: "fs.may_detach_mounts sysctl",
init: checkFsMayDetachMounts,
err: &checkFsMayDetachMountsErr,
activated: nil,
fatal: true,
},
}
for _, i := range toInit {
i.init()
if *i.err != nil {
err := fmt.Errorf("node configuration validation for %s failed: %w", i.name, *i.err)
if i.fatal {
return err
}
logrus.Warn(err)
}
if i.activated != nil {
logrus.Infof("Node configuration value for %s is %v", i.name, *i.activated)
}
}
return nil
}
package node
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/sirupsen/logrus"
)
var checkFsMayDetachMountsErr error
// checkFsMayDetachMounts is called once from ValidateConfig(),
// and its return value is ignored. It makes cri-o fail in case
// checkFsMayDetachMountsErr is not nil.
func checkFsMayDetachMounts() bool {
// this sysctl is specific to RHEL7 kernel
const file = "/proc/sys/fs/may_detach_mounts"
data, err := os.ReadFile(file)
if err != nil {
if !os.IsNotExist(err) {
logrus.WithError(err).Debug("checkFsMayDetachMounts")
}
return true
}
str := strings.TrimSpace(string(data))
val, err := strconv.ParseInt(str, 10, 64)
if err != nil { // should never happen
logrus.WithError(err).Warnf("CheckFsMayDetachMounts: file %s, value %q, can't convert to int", file, str)
return true
}
if val != 1 {
checkFsMayDetachMountsErr = fmt.Errorf("fs.may_detach_mounts sysctl: expected 1, got %d; this may result in \"device or resource busy\" errors while stopping or removing containers", val)
return false
}
return true
}
//go:build linux
package node
import (
"fmt"
"sync"
"github.com/cri-o/cri-o/utils/cmdrunner"
)
var (
systemdHasAllowedCPUsOnce sync.Once
systemdHasAllowedCPUs bool
systemdHasAllowedCPUsErr error
)
func SystemdHasAllowedCPUs() bool {
systemdHasAllowedCPUsOnce.Do(func() {
systemdHasAllowedCPUs, systemdHasAllowedCPUsErr = systemdSupportsProperty("AllowedCPUs")
})
return systemdHasAllowedCPUs
}
// systemdSupportsProperty checks whether systemd supports a property
// It returns an error if it does not.
func systemdSupportsProperty(property string) (bool, error) {
output, err := cmdrunner.Command("systemctl", "show", "-p", property, "systemd").Output()
if err != nil {
return false, fmt.Errorf("check systemd %s: %w", property, err)
}
if len(output) == 0 {
return false, nil
}
return true, nil
}
package nri
import (
"slices"
"time"
nri "github.com/containerd/nri/pkg/adaptation"
validator "github.com/containerd/nri/plugins/default-validator"
"github.com/containerd/otelttrpc"
"github.com/containerd/ttrpc"
)
// Config represents the CRI-O NRI configuration.
type Config struct {
Enabled bool `toml:"enable_nri"`
SocketPath string `toml:"nri_listen"`
PluginPath string `toml:"nri_plugin_dir"`
PluginConfigPath string `toml:"nri_plugin_config_dir"`
PluginRegistrationTimeout time.Duration `toml:"nri_plugin_registration_timeout"`
PluginRequestTimeout time.Duration `toml:"nri_plugin_request_timeout"`
DisableConnections bool `toml:"nri_disable_connections"`
withTracing bool
DefaultValidator *DefaultValidatorConfig `toml:"default_validator"`
}
type DefaultValidatorConfig struct {
Enable bool `toml:"nri_enable_default_validator"`
RejectOCIHookAdjustment bool `toml:"nri_validator_reject_oci_hook_adjustment"`
RejectRuntimeDefaultSeccompAdjustment bool `toml:"nri_validator_reject_runtime_default_seccomp_adjustment"`
RejectUnconfinedSeccompAdjustment bool `toml:"nri_validator_reject_unconfined_seccomp_adjustment"`
RejectCustomSeccompAdjustment bool `toml:"nri_validator_reject_custom_seccomp_adjustment"`
RejectNamespaceAdjustment bool `toml:"nri_validator_reject_namespace_adjustment"`
RequiredPlugins []string `toml:"nri_validator_required_plugins"`
TolerateMissingAnnotation string `toml:"nri_validator_tolerate_missing_plugins_annotation"`
}
// New returns the default CRI-O NRI configuration.
func New() *Config {
return &Config{
Enabled: true,
SocketPath: nri.DefaultSocketPath,
PluginPath: nri.DefaultPluginPath,
PluginConfigPath: nri.DefaultPluginConfigPath,
PluginRegistrationTimeout: nri.DefaultPluginRegistrationTimeout,
PluginRequestTimeout: nri.DefaultPluginRequestTimeout,
DefaultValidator: &DefaultValidatorConfig{},
}
}
func (c *Config) IsDefaultValidatorDefaultConfig() bool {
return c.defaultValidatorEqual(New())
}
func (c *Config) defaultValidatorEqual(o *Config) bool {
cv, ov := c.DefaultValidator, o.DefaultValidator
if cv.Enable != ov.Enable {
return false
}
if cv.RejectOCIHookAdjustment != ov.RejectOCIHookAdjustment {
return false
}
if cv.RejectRuntimeDefaultSeccompAdjustment != ov.RejectRuntimeDefaultSeccompAdjustment {
return false
}
if cv.RejectUnconfinedSeccompAdjustment != ov.RejectUnconfinedSeccompAdjustment {
return false
}
if cv.RejectCustomSeccompAdjustment != ov.RejectCustomSeccompAdjustment {
return false
}
if cv.RejectNamespaceAdjustment != ov.RejectNamespaceAdjustment {
return false
}
if len(cv.RequiredPlugins) != len(ov.RequiredPlugins) {
return false
}
if cv.TolerateMissingAnnotation != ov.TolerateMissingAnnotation {
return false
}
if !slices.Equal(
slices.Sorted(slices.Values(cv.RequiredPlugins)),
slices.Sorted(slices.Values(ov.RequiredPlugins))) {
return false
}
return true
}
// Validate loads and validates the effective runtime NRI configuration.
func (c *Config) Validate(onExecution bool) error {
return nil
}
func (c *Config) WithTracing(enable bool) *Config {
if c != nil {
c.withTracing = enable
}
return c
}
// ToOptions returns NRI options for this configuration.
func (c *Config) ToOptions() []nri.Option {
opts := []nri.Option{}
if c != nil && c.SocketPath != "" {
opts = append(opts, nri.WithSocketPath(c.SocketPath))
}
if c != nil && c.PluginPath != "" {
opts = append(opts, nri.WithPluginPath(c.PluginPath))
}
if c != nil && c.PluginConfigPath != "" {
opts = append(opts, nri.WithPluginConfigPath(c.PluginConfigPath))
}
if c != nil && c.DisableConnections {
opts = append(opts, nri.WithDisabledExternalConnections())
}
if c != nil && c.DefaultValidator != nil {
opts = append(opts, nri.WithDefaultValidator(c.DefaultValidator.ToNRI()))
}
if c.withTracing {
opts = append(opts,
nri.WithTTRPCOptions(
[]ttrpc.ClientOpts{
ttrpc.WithUnaryClientInterceptor(
otelttrpc.UnaryClientInterceptor(),
),
},
[]ttrpc.ServerOpt{
ttrpc.WithUnaryServerInterceptor(
otelttrpc.UnaryServerInterceptor(),
),
},
),
)
}
return opts
}
func (c *Config) ConfigureTimeouts() {
if c.PluginRegistrationTimeout != 0 {
nri.SetPluginRegistrationTimeout(c.PluginRegistrationTimeout)
}
if c.PluginRequestTimeout != 0 {
nri.SetPluginRequestTimeout(c.PluginRequestTimeout)
}
}
func (c *DefaultValidatorConfig) ToNRI() *validator.DefaultValidatorConfig {
if c == nil {
return nil
}
return &validator.DefaultValidatorConfig{
Enable: c.Enable,
RejectOCIHookAdjustment: c.RejectOCIHookAdjustment,
RejectRuntimeDefaultSeccompAdjustment: c.RejectRuntimeDefaultSeccompAdjustment,
RejectUnconfinedSeccompAdjustment: c.RejectUnconfinedSeccompAdjustment,
RejectCustomSeccompAdjustment: c.RejectCustomSeccompAdjustment,
RejectNamespaceAdjustment: c.RejectNamespaceAdjustment,
RequiredPlugins: c.RequiredPlugins,
TolerateMissingAnnotation: c.TolerateMissingAnnotation,
}
}
package nsmgr
import (
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"syscall"
nspkg "github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/storage/pkg/idtools"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"github.com/cri-o/cri-o/utils"
"github.com/cri-o/cri-o/utils/cmdrunner"
)
// NamespaceManager manages the server's namespaces.
// Specifically, it is an interface for how the server is creating namespaces,
// and can be requested to create namespaces for a pod.
type NamespaceManager struct {
namespacesDir string
pinnsPath string
}
// New creates a new NamespaceManager.
func New(namespacesDir, pinnsPath string) *NamespaceManager {
return &NamespaceManager{
namespacesDir: namespacesDir,
pinnsPath: pinnsPath,
}
}
func (mgr *NamespaceManager) Initialize() error {
if err := os.MkdirAll(mgr.namespacesDir, 0o755); err != nil {
return fmt.Errorf("invalid namespaces_dir: %w", err)
}
for _, ns := range supportedNamespacesForPinning() {
nsDir := mgr.dirForType(ns)
if err := utils.IsDirectory(nsDir); err != nil {
// The file is not a directory, but exists.
// We should remove it.
if errors.Is(err, syscall.ENOTDIR) {
if err := os.Remove(nsDir); err != nil {
return fmt.Errorf("remove file to create namespaces sub-dir: %w", err)
}
logrus.Infof("Removed file %s to create directory in that path.", nsDir)
} else if !os.IsNotExist(err) {
// if it's neither an error because the file exists
// nor an error because it does not exist, it is
// some other disk error.
return fmt.Errorf("checking whether namespaces sub-dir exists: %w", err)
}
if err := os.MkdirAll(nsDir, 0o755); err != nil {
return fmt.Errorf("invalid namespaces sub-dir: %w", err)
}
}
}
return nil
}
// NewPodNamespaces creates new namespaces for a pod.
// It's responsible for running pinns and creating the Namespace objects.
// The caller is responsible for cleaning up the namespaces by calling Namespace.Remove().
func (mgr *NamespaceManager) NewPodNamespaces(cfg *PodNamespacesConfig) ([]Namespace, error) {
if cfg == nil {
return nil, errors.New("PodNamespacesConfig cannot be nil")
}
if len(cfg.Namespaces) == 0 {
return []Namespace{}, nil
}
typeToArg := map[NSType]string{
IPCNS: "--ipc",
UTSNS: "--uts",
USERNS: "--user",
NETNS: "--net",
}
pinnedNamespace := uuid.New().String()
pinnsArgs := []string{
"-d", mgr.namespacesDir,
"-f", pinnedNamespace,
}
for key, value := range cfg.Sysctls {
pinnsArgs = append(pinnsArgs, "-s", fmt.Sprintf("%s=%s", key, value))
}
var rootPair idtools.IDPair
if cfg.IDMappings != nil {
rootPair = cfg.IDMappings.RootPair()
}
for _, ns := range cfg.Namespaces {
arg, ok := typeToArg[ns.Type]
if !ok {
return nil, fmt.Errorf("invalid namespace type: %s", ns.Type)
}
if ns.Host {
arg += "=host"
}
pinnsArgs = append(pinnsArgs, arg)
ns.Path = filepath.Join(mgr.namespacesDir, string(ns.Type)+"ns", pinnedNamespace)
if cfg.IDMappings != nil {
if err := chownDirToIDPair(ns.Path, rootPair); err != nil {
return nil, err
}
}
}
if cfg.IDMappings != nil {
pinnsArgs = append(pinnsArgs,
"--uid-mapping="+getMappingsForPinns(cfg.IDMappings.UIDs()),
"--gid-mapping="+getMappingsForPinns(cfg.IDMappings.GIDs()))
}
logrus.Debugf("Calling pinns with %v", pinnsArgs)
output, err := cmdrunner.Command(mgr.pinnsPath, pinnsArgs...).CombinedOutput()
if err != nil {
logrus.Warnf("Pinns %v failed: %s (%v)", pinnsArgs, string(output), err)
// cleanup the mounts
for _, ns := range cfg.Namespaces {
if mErr := unix.Unmount(ns.Path, unix.MNT_DETACH); mErr != nil && !errors.Is(mErr, unix.EINVAL) {
logrus.Warnf("Failed to unmount %s: %v", ns.Path, mErr)
}
}
return nil, fmt.Errorf("failed to pin namespaces %v: %s %w", cfg.Namespaces, output, err)
}
logrus.Debugf("Got output from pinns: %s", output)
returnedNamespaces := make([]Namespace, 0, len(cfg.Namespaces))
for _, ns := range cfg.Namespaces {
ns, err := GetNamespace(ns.Path, ns.Type)
if err != nil {
for _, nsToClose := range returnedNamespaces {
if err2 := nsToClose.Remove(); err2 != nil {
logrus.Errorf("Failed to remove namespace after failed to create: %v", err2)
}
}
return nil, err
}
returnedNamespaces = append(returnedNamespaces, ns)
}
return returnedNamespaces, nil
}
func chownDirToIDPair(pinPath string, rootPair idtools.IDPair) error {
if err := os.MkdirAll(filepath.Dir(pinPath), 0o755); err != nil {
return err
}
f, err := os.Create(pinPath)
if err != nil {
return err
}
f.Close()
return os.Chown(pinPath, rootPair.UID, rootPair.GID)
}
func getMappingsForPinns(mappings []idtools.IDMap) string {
g := new(bytes.Buffer)
for _, m := range mappings {
fmt.Fprintf(g, "%d-%d-%d@", m.ContainerID, m.HostID, m.Size)
}
return g.String()
}
// NamespaceFromProcEntry creates a new namespace object from a bind mount from a processes proc entry.
// The caller is responsible for cleaning up the namespace by calling Namespace.Remove().
// This function is heavily based on containernetworking ns package found at:
// https://github.com/containernetworking/plugins/blob/5c3c17164270150467498a32c71436c7cd5501be/pkg/ns/ns.go#L140
// Credit goes to the CNI authors.
func (mgr *NamespaceManager) NamespaceFromProcEntry(pid int, nsType NSType) (_ Namespace, retErr error) {
// now create an empty file
f, err := os.CreateTemp(mgr.dirForType(PIDNS), string(PIDNS))
if err != nil {
return nil, fmt.Errorf("creating namespace path: %w", err)
}
pinnedNamespace := f.Name()
f.Close()
defer func() {
if retErr != nil {
if err := os.Remove(pinnedNamespace); err != nil {
logrus.Errorf("Failed to remove namespace after failure to pin namespace: %v", err)
}
}
}()
podPidnsProc := NamespacePathFromProc(nsType, pid)
// pid must have stopped or be incorrect, report error
if podPidnsProc == "" {
return nil, fmt.Errorf("proc entry for pid %d is gone; pid not created or stopped", pid)
}
// bind mount the new ns from the proc entry onto the mount point
if err := unix.Mount(podPidnsProc, pinnedNamespace, "none", unix.MS_BIND, ""); err != nil {
return nil, fmt.Errorf("error mounting %s namespace path: %w", string(nsType), err)
}
defer func() {
if retErr != nil {
if err := unix.Unmount(pinnedNamespace, unix.MNT_DETACH); err != nil && !errors.Is(err, unix.EINVAL) {
logrus.Errorf("Failed umount after failed to pin %s namespace: %v", string(nsType), err)
}
}
}()
return GetNamespace(pinnedNamespace, nsType)
}
// dirForType returns the sub-directory for that particular NSType
// which is of the form `$namespaceDir/$nsType+"ns"`.
func (mgr *NamespaceManager) dirForType(ns NSType) string {
return filepath.Join(mgr.namespacesDir, string(ns)+"ns")
}
// NamespacePathFromProc returns the namespace path of type nsType for a given pid and type.
func NamespacePathFromProc(nsType NSType, pid int) string {
// verify nsPath exists on the host. This will prevent us from fatally erroring
// on network tear down if the path doesn't exist
// Technically, this is pretty racy, but so is every check using the infra container PID.
nsPath := fmt.Sprintf("/proc/%d/ns/%s", pid, nsType)
if _, err := os.Stat(nsPath); err != nil {
return ""
}
// verify the path we found is indeed a namespace
if err := nspkg.IsNSorErr(nsPath); err != nil {
return ""
}
return nsPath
}
//go:build linux
package nsmgr
import (
"errors"
"fmt"
"os"
"sync"
nspkg "github.com/containernetworking/plugins/pkg/ns"
"github.com/containers/storage/pkg/idtools"
"golang.org/x/sys/unix"
)
// supportedNamespacesForPinning returns a slice of
// the names of namespaces that CRI-O supports
// pinning.
func supportedNamespacesForPinning() []NSType {
return []NSType{NETNS, IPCNS, UTSNS, USERNS, PIDNS}
}
type PodNamespacesConfig struct {
Namespaces []*PodNamespaceConfig
IDMappings *idtools.IDMappings
Sysctls map[string]string
}
type PodNamespaceConfig struct {
Type NSType
Host bool
Path string
}
// namespace is the internal implementation of the Namespace interface.
type namespace struct {
sync.Mutex
ns NS
closed bool
nsType NSType
nsPath string
}
// NS is a wrapper for the containernetworking plugin's NetNS interface
// It exists because while NetNS is specifically called such, it is really a generic
// namespace, and can be used for other namespace types.
type NS interface {
nspkg.NetNS
}
// Path returns the bind mount path of the namespace.
func (n *namespace) Path() string {
if n == nil || n.ns == nil {
return ""
}
return n.nsPath
}
// Type returns the namespace type (net, ipc, user, pid or uts).
func (n *namespace) Type() NSType {
return n.nsType
}
// Remove ensures this namespace is closed and removed.
func (n *namespace) Remove() error {
n.Lock()
defer n.Unlock()
if n.closed {
// Remove() can be called multiple
// times without returning an error.
return nil
}
if err := n.ns.Close(); err != nil {
return err
}
n.closed = true
fp := n.Path()
if fp == "" {
return nil
}
// Don't run into unmount issues if the network namespace does not exist any more.
if _, err := os.Stat(fp); err == nil {
// try to unmount, ignoring "not mounted" (EINVAL) error.
if err := unix.Unmount(fp, unix.MNT_DETACH); err != nil && !errors.Is(err, unix.EINVAL) {
return fmt.Errorf("unable to unmount %s: %w", fp, err)
}
return os.RemoveAll(fp)
}
return nil
}
// GetNamespace takes a path and type, checks if it is a namespace, and if so
// returns an instance of the Namespace interface.
func GetNamespace(nsPath string, nsType NSType) (Namespace, error) {
ns, err := nspkg.GetNS(nsPath)
if err != nil {
// Failed to GetNS. It's possible this is expected (pod is stopped).
return &namespace{nsType: nsType, nsPath: nsPath, closed: true}, err
}
return &namespace{ns: ns, nsType: nsType, nsPath: nsPath}, nil
}
package rdt
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"os"
)
func FuzzLoadConfig(data []byte) int {
c := Config{}
c.enabled = true
c.supported = true
f := fuzz.NewConsumer(data)
confBytes, err := f.GetBytes()
if err != nil {
return 0
}
randomFile, err := os.Create("rdt_fuzz.config")
if err != nil {
return 0
}
defer os.Remove("rdt_fuzz.config")
_, err = randomFile.Write(confBytes)
if err != nil {
randomFile.Close()
return 0
}
c.Load("rdt_fuzz.config")
randomFile.Close()
return 1
}
package rdt
import (
"fmt"
"log/slog"
"os"
"github.com/intel/goresctrl/pkg/rdt"
"github.com/sirupsen/logrus"
"sigs.k8s.io/yaml"
)
const (
// DefaultRdtConfigFile is the default value for RDT config file path.
DefaultRdtConfigFile = ""
// ResctrlPrefix is the prefix used for class/closid directories under the resctrl filesystem.
ResctrlPrefix = ""
)
type Config struct {
supported bool
enabled bool
config *rdt.Config
}
// New creates a new RDT config instance.
func New() *Config {
c := &Config{
supported: true,
config: &rdt.Config{},
}
rdt.SetLogger(slog.Default())
if err := rdt.Initialize(ResctrlPrefix); err != nil {
c.supported = false
}
return c
}
// Supported returns true if RDT is enabled in the host system.
func (c *Config) Supported() bool {
return c.supported
}
// Enabled returns true if RDT is enabled in CRI-O.
func (c *Config) Enabled() bool {
return c.enabled
}
// Load loads and validates RDT config.
func (c *Config) Load(path string) error {
c.enabled = false
if !c.Supported() {
logrus.Info("RDT not available in the host system")
return nil
}
if path == "" {
logrus.Info("No RDT config file specified, RDT not enabled")
return nil
}
tmpCfg, err := loadConfigFile(path)
if err != nil {
return err
}
if err := rdt.SetConfig(tmpCfg, true); err != nil {
return fmt.Errorf("configuring RDT failed: %w", err)
}
logrus.Infof("RDT enabled, config successfully loaded from %q", path)
c.enabled = true
c.config = tmpCfg
return nil
}
func loadConfigFile(path string) (*rdt.Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("reading rdt config file failed: %w", err)
}
c := &rdt.Config{}
if err = yaml.Unmarshal(data, c); err != nil {
return nil, fmt.Errorf("parsing RDT config failed: %w", err)
}
return c, nil
}
func (c *Config) ContainerClassFromAnnotations(containerName string, containerAnnotations, podAnnotations map[string]string) (string, error) {
cls, err := rdt.ContainerClassFromAnnotations(containerName, containerAnnotations, podAnnotations)
if err != nil {
return "", err
}
if cls != "" && !c.Enabled() {
return "", fmt.Errorf("RDT disabled, refusing to set RDT class of container %q to %q", containerName, cls)
}
return cls, nil
}
//go:build !(seccomp && linux && cgo)
package seccomp
import (
"context"
"github.com/containers/common/pkg/seccomp"
imagetypes "github.com/containers/image/v5/types"
"github.com/opencontainers/runtime-tools/generate"
types "k8s.io/cri-api/pkg/apis/runtime/v1"
)
// Config is the global seccomp configuration type.
type Config struct {
enabled bool
}
// Notifier wraps a seccomp notifier instance for a container.
type Notifier struct{}
// Notification is a seccomp notification which gets sent to the CRI-O server.
type Notification struct{}
// New creates a new default seccomp configuration instance.
func New() *Config {
return &Config{
enabled: false,
}
}
// Setup can be used to setup the seccomp profile.
func (c *Config) Setup(
ctx context.Context,
sys *imagetypes.SystemContext,
msgChan chan Notification,
containerID, containerName string,
sandboxAnnotations, imageAnnotations map[string]string,
specGenerator *generate.Generator,
profileField *types.SecurityProfile,
graphRoot string,
) (*Notifier, string, error) {
return nil, "", nil
}
// SetNotifierPath sets the default path for creating seccomp notifier sockets.
func (c *Config) SetNotifierPath(path string) {
}
// NotifierPath returns the currently used seccomp notifier base path.
func (c *Config) NotifierPath() string {
return ""
}
// LoadProfile can be used to load a seccomp profile from the provided path.
// This method will not fail if seccomp is disabled.
func (c *Config) LoadProfile(profilePath string) error {
return nil
}
// LoadDefaultProfile sets the internal default profile.
func (c *Config) LoadDefaultProfile() error {
return nil
}
// NewNotifier starts the notifier for the provided arguments.
func NewNotifier(
ctx context.Context,
msgChan chan Notification,
containerID, listenerPath string,
annotationMap map[string]string,
) (*Notifier, error) {
return nil, nil
}
// Close can be used to close the notifier listener.
func (*Notifier) Close() error {
return nil
}
func (*Notifier) AddSyscall(syscall string) {
}
func (*Notifier) UsedSyscalls() string {
return ""
}
func (*Notifier) StopContainers() bool {
return false
}
func (*Notifier) OnExpired(callback func()) {
}
func (*Notification) Ctx() context.Context {
return nil
}
func (*Notification) ContainerID() string {
return ""
}
func (*Notification) Syscall() string {
return ""
}
func (c *Config) IsDisabled() bool {
return true
}
// Profile returns the currently loaded seccomp profile.
func (c *Config) Profile() *seccomp.Seccomp {
return nil
}
func DefaultProfile() *seccomp.Seccomp {
return nil
}
package ulimits
import (
"fmt"
"strings"
units "github.com/docker/go-units"
)
type Ulimit struct {
Name string
Hard uint64
Soft uint64
}
type Config struct {
ulimits []Ulimit
}
func New() *Config {
return &Config{
ulimits: make([]Ulimit, 0),
}
}
func (c *Config) LoadUlimits(ulimits []string) error {
// Process and initialize ulimits at cri-o start up, so crio fails early if
// its misconfigured. After this, we can always refer to config.Ulimits() to get
// the configured Ulimits
for _, u := range ulimits {
ul, err := units.ParseUlimit(u)
if err != nil {
return fmt.Errorf("unrecognized ulimit %s: %w", u, err)
}
rl, err := ul.GetRlimit()
if err != nil {
return err
}
// This sucks, but it's the runtime-tools interface
c.ulimits = append(c.ulimits, Ulimit{
Name: "RLIMIT_" + strings.ToUpper(ul.Name),
Hard: rl.Hard,
Soft: rl.Soft,
})
}
return nil
}
func (c *Config) Ulimits() []Ulimit {
return c.ulimits
}
//go:build linux
// Code in this package is heavily adapted from https://github.com/opencontainers/runc/blob/7362fa2d282feffb9b19911150e01e390a23899d/libcontainer/cgroups/systemd
// Credit goes to the runc authors.
package dbusmgr
import (
"context"
"errors"
"sync"
"syscall"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
dbus "github.com/godbus/dbus/v5"
)
var (
dbusC *systemdDbus.Conn
dbusMu sync.RWMutex
dbusInited bool
dbusRootless bool
)
type DbusConnManager struct{}
// NewDbusConnManager initializes systemd dbus connection manager.
func NewDbusConnManager(rootless bool) *DbusConnManager {
dbusMu.Lock()
defer dbusMu.Unlock()
if dbusInited && rootless != dbusRootless {
panic("can't have both root and rootless dbus")
}
dbusRootless = rootless
dbusInited = true
return &DbusConnManager{}
}
// getConnection lazily initializes and returns systemd dbus connection.
func (d *DbusConnManager) GetConnection() (*systemdDbus.Conn, error) {
// In the case where dbusC != nil
// Use the read lock the first time to ensure
// that Conn can be acquired at the same time.
dbusMu.RLock()
if conn := dbusC; conn != nil {
dbusMu.RUnlock()
return conn, nil
}
dbusMu.RUnlock()
// In the case where dbusC == nil
// Use write lock to ensure that only one
// will be created
dbusMu.Lock()
defer dbusMu.Unlock()
if conn := dbusC; conn != nil {
return conn, nil
}
conn, err := d.newConnection()
if err != nil {
return nil, err
}
dbusC = conn
return conn, nil
}
func (d *DbusConnManager) newConnection() (*systemdDbus.Conn, error) {
if dbusRootless {
return newUserSystemdDbus()
}
return systemdDbus.NewWithContext(context.TODO())
}
// RetryOnDisconnect calls op, and if the error it returns is about closed dbus
// connection, the connection is re-established and the op is retried. This helps
// with the situation when dbus is restarted and we have a stale connection.
func (d *DbusConnManager) RetryOnDisconnect(op func(*systemdDbus.Conn) error) error {
for {
conn, err := d.GetConnection()
if err != nil {
return err
}
err = op(conn)
if err == nil {
return nil
}
if errors.Is(err, syscall.EAGAIN) {
continue
}
if !errors.Is(err, dbus.ErrClosed) {
return err
}
// dbus connection closed, we should reconnect and try again
d.resetConnection(conn)
}
}
// resetConnection resets the connection to its initial state
// (so it can be reconnected if necessary).
func (d *DbusConnManager) resetConnection(conn *systemdDbus.Conn) {
dbusMu.Lock()
defer dbusMu.Unlock()
if dbusC != nil && dbusC == conn {
dbusC.Close()
dbusC = nil
}
}
// Code in this package is heavily adapted from https://github.com/opencontainers/runc/blob/7362fa2d282feffb9b19911150e01e390a23899d/libcontainer/cgroups/systemd
// Credit goes to the runc authors.
package dbusmgr
import (
"bufio"
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
dbus "github.com/godbus/dbus/v5"
"github.com/moby/sys/userns"
"github.com/cri-o/cri-o/utils/cmdrunner"
)
// newUserSystemdDbus creates a connection for systemd user-instance.
func newUserSystemdDbus() (*systemdDbus.Conn, error) {
addr, err := DetectUserDbusSessionBusAddress()
if err != nil {
return nil, err
}
uid, err := DetectUID()
if err != nil {
return nil, err
}
return systemdDbus.NewConnection(func() (*dbus.Conn, error) {
conn, err := dbus.Dial(addr)
if err != nil {
return nil, fmt.Errorf("error while dialing %q: %w", addr, err)
}
methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))}
err = conn.Auth(methods)
if err != nil {
conn.Close()
return nil, fmt.Errorf("error while authenticating connection, address=%q, UID=%d: %w", addr, uid, err)
}
if err = conn.Hello(); err != nil {
conn.Close()
return nil, fmt.Errorf("error while sending Hello message, address=%q, UID=%d: %w", addr, uid, err)
}
return conn, nil
})
}
// DetectUID detects UID from the OwnerUID field of `busctl --user status`
// if running in userNS. The value corresponds to sd_bus_creds_get_owner_uid(3) .
//
// Otherwise returns os.Getuid() .
func DetectUID() (int, error) {
if !userns.RunningInUserNS() {
return os.Getuid(), nil
}
b, err := cmdrunner.Command("busctl", "--user", "--no-pager", "status").CombinedOutput()
if err != nil {
return -1, fmt.Errorf("could not execute `busctl --user --no-pager status`: %q: %w", string(b), err)
}
scanner := bufio.NewScanner(bytes.NewReader(b))
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if after, ok := strings.CutPrefix(s, "OwnerUID="); ok {
uidStr := after
i, err := strconv.Atoi(uidStr)
if err != nil {
return -1, fmt.Errorf("could not detect the OwnerUID: %s: %w", s, err)
}
return i, nil
}
}
if err := scanner.Err(); err != nil {
return -1, err
}
return -1, errors.New("could not detect the OwnerUID")
}
// DetectUserDbusSessionBusAddress returns $DBUS_SESSION_BUS_ADDRESS if set.
// Otherwise returns "unix:path=$XDG_RUNTIME_DIR/bus" if $XDG_RUNTIME_DIR/bus exists.
// Otherwise parses the value from `systemctl --user show-environment` .
func DetectUserDbusSessionBusAddress() (string, error) {
if env := os.Getenv("DBUS_SESSION_BUS_ADDRESS"); env != "" {
return env, nil
}
if xdr := os.Getenv("XDG_RUNTIME_DIR"); xdr != "" {
busPath := filepath.Join(xdr, "bus")
if _, err := os.Stat(busPath); err == nil {
busAddress := "unix:path=" + busPath
return busAddress, nil
}
}
b, err := cmdrunner.Command("systemctl", "--user", "--no-pager", "show-environment").CombinedOutput()
if err != nil {
return "", fmt.Errorf("could not execute `systemctl --user --no-pager show-environment`, output=%q: %w", string(b), err)
}
scanner := bufio.NewScanner(bytes.NewReader(b))
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if after, ok := strings.CutPrefix(s, "DBUS_SESSION_BUS_ADDRESS="); ok {
return after, nil
}
}
return "", errors.New("could not detect DBUS_SESSION_BUS_ADDRESS from `systemctl --user --no-pager show-environment`. Make sure you have installed the dbus-user-session or dbus-daemon package and then run: `systemctl --user start dbus`")
}
package log
import (
"fmt"
"github.com/sirupsen/logrus"
)
func RemoveHook(logger *logrus.Logger, name string) {
filteredHooks := make(logrus.LevelHooks)
for level, hooks := range logger.Hooks {
for _, hook := range hooks {
if fmt.Sprintf("%T", hook) != "*log."+name {
filteredHooks[level] = append(filteredHooks[level], hook)
}
}
}
logger.ReplaceHooks(filteredHooks)
}
package log
import (
"fmt"
"runtime"
"strings"
"github.com/sirupsen/logrus"
)
type FileNameHook struct {
field string
skipPrefix []string
formatter logrus.Formatter
Formatter func(file, function string, line int) string
}
type wrapper struct {
old logrus.Formatter
hook *FileNameHook
}
// NewFilenameHook creates a new default FileNameHook.
func NewFilenameHook() *FileNameHook {
return &FileNameHook{
field: "file",
skipPrefix: []string{"log/", "logrus/", "logrus@"},
Formatter: func(file, function string, line int) string {
return fmt.Sprintf("%s:%d", file, line)
},
}
}
// Levels returns the levels for which the hook is activated. This contains
// currently only the DebugLevel.
func (f *FileNameHook) Levels() []logrus.Level {
return []logrus.Level{logrus.DebugLevel}
}
// Fire executes the hook for every logrus entry.
func (f *FileNameHook) Fire(entry *logrus.Entry) error {
if f.formatter != entry.Logger.Formatter {
f.formatter = &wrapper{entry.Logger.Formatter, f}
}
entry.Logger.Formatter = f.formatter
return nil
}
// Format returns the log format including the caller as field.
func (w *wrapper) Format(entry *logrus.Entry) ([]byte, error) {
field := entry.WithField(
w.hook.field,
w.hook.Formatter(w.hook.findCaller()),
)
field.Level = entry.Level
field.Message = entry.Message
return w.old.Format(field)
}
// findCaller returns the file, function and line number for the current call.
func (f *FileNameHook) findCaller() (file, function string, line int) {
var pc uintptr
// The maximum amount of frames to be iterated
const maxFrames = 10
for i := range maxFrames {
// The amount of frames to be skipped to land at the actual caller
const skipFrames = 6
pc, file, line = caller(skipFrames + i)
if !f.shouldSkipPrefix(file) {
break
}
}
if pc != 0 {
frames := runtime.CallersFrames([]uintptr{pc})
frame, _ := frames.Next()
function = frame.Function
}
return file, function, line
}
// caller reports file and line number information about function invocations
// on the calling goroutine's stack. The argument skip is the number of stack
// frames to ascend, with 0 identifying the caller of Caller.
func caller(skip int) (pc uintptr, file string, line int) {
pc, file, line, ok := runtime.Caller(skip)
if !ok {
return 0, "", 0
}
n := 0
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
n++
if n >= 2 {
file = file[i+1:]
break
}
}
}
return pc, file, line
}
// shouldSkipPrefix returns true if the hook should be skipped, otherwise false.
func (f *FileNameHook) shouldSkipPrefix(file string) bool {
for i := range f.skipPrefix {
if strings.HasPrefix(file, f.skipPrefix[i]) {
return true
}
}
return false
}
package log
import (
"fmt"
"io"
"regexp"
"github.com/sirupsen/logrus"
)
type FilterHook struct {
custom *regexp.Regexp
predefined *regexp.Regexp
}
// NewFilterHook creates a new default FilterHook.
func NewFilterHook(filter string) (*FilterHook, error) {
var (
custom *regexp.Regexp
err error
)
if filter != "" {
custom, err = regexp.Compile(filter)
logrus.Debugf("Using log filter: %q", custom)
if err != nil {
return nil, fmt.Errorf("custom log level filter does not compile: %w", err)
}
}
predefined := regexp.MustCompile(`\[[\d\s]+\]`)
return &FilterHook{custom, predefined}, nil
}
// Levels returns the levels for which the hook is activated. This contains
// currently only the DebugLevel.
func (f *FilterHook) Levels() []logrus.Level {
return logrus.AllLevels
}
// Fire executes the hook for every logrus entry.
func (f *FilterHook) Fire(entry *logrus.Entry) error {
// Custom specified filters get skipped completely
if f.custom != nil && !f.custom.MatchString(entry.Message) {
*entry = logrus.Entry{
Logger: &logrus.Logger{
Out: io.Discard,
Formatter: &logrus.JSONFormatter{},
},
}
}
// Apply pre-defined filters
if entry.Level == logrus.DebugLevel {
entry.Message = f.predefined.ReplaceAllString(entry.Message, "[FILTERED]")
}
return nil
}
package log
import (
"fmt"
"strings"
"github.com/go-logr/logr"
"github.com/sirupsen/logrus"
"k8s.io/klog/v2"
)
// InitKlogShim creates a shim between logrus and klog by forwarding klog
// messages to the logrus logger. To reduce the overall verbosity we log every
// Info klog message in logrus Debug verbosity.
func InitKlogShim() {
klog.LogToStderr(false)
klog.SetLogger(logr.New(&logSink{}))
}
type logSink struct{}
func (l *logSink) Info(level int, msg string, keysAndValues ...any) {
res := &strings.Builder{}
res.WriteString(msg)
writeKeysAndValues(res, keysAndValues...)
logrus.Debug(res.String())
}
func (l *logSink) Error(err error, msg string, keysAndValues ...any) {
res := &strings.Builder{}
res.WriteString(msg)
if err != nil {
res.WriteString(": ")
res.WriteString(err.Error())
}
writeKeysAndValues(res, keysAndValues...)
logrus.Error(res.String())
}
func writeKeysAndValues(b *strings.Builder, keysAndValues ...any) {
if len(keysAndValues) == 0 {
return
}
const missingValue = "[MISSING]"
b.WriteString(" (")
for i := 0; i < len(keysAndValues); i += 2 {
var v any
k := keysAndValues[i]
if i+1 < len(keysAndValues) {
v = keysAndValues[i+1]
} else {
v = missingValue
}
if i > 0 {
b.WriteByte(' ')
}
switch v.(type) {
case string, error:
fmt.Fprintf(b, "%s=%q", k, v)
case []byte:
fmt.Fprintf(b, "%s=%+q", k, v)
default:
if _, ok := v.(fmt.Stringer); ok {
fmt.Fprintf(b, "%s=%q", k, v)
} else {
fmt.Fprintf(b, "%s=%+v", k, v)
}
}
}
b.WriteByte(')')
}
func (l *logSink) Init(logr.RuntimeInfo) {}
func (l *logSink) Enabled(int) bool { return true }
func (l *logSink) WithValues(...any) logr.LogSink { return l }
func (l *logSink) WithName(string) logr.LogSink { return l }
// Package log provides a global interface to logging functionality
package log
import (
"context"
"runtime"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/otel/trace"
)
type (
ID struct{}
Name struct{}
)
func Tracef(ctx context.Context, format string, args ...any) {
entry(ctx).Tracef(format, args...)
}
func Debugf(ctx context.Context, format string, args ...any) {
entry(ctx).Debugf(format, args...)
}
func Infof(ctx context.Context, format string, args ...any) {
entry(ctx).Infof(format, args...)
}
func Warnf(ctx context.Context, format string, args ...any) {
entry(ctx).Warnf(format, args...)
}
func Errorf(ctx context.Context, format string, args ...any) {
entry(ctx).Errorf(format, args...)
}
func Fatalf(ctx context.Context, format string, args ...any) {
entry(ctx).Fatalf(format, args...)
}
func WithFields(ctx context.Context, fields map[string]any) *logrus.Entry {
return entry(ctx).WithFields(fields)
}
func entry(ctx context.Context) *logrus.Entry {
logger := logrus.StandardLogger()
if ctx == nil {
return logrus.NewEntry(logger)
}
id, idOk := ctx.Value(ID{}).(string)
name, nameOk := ctx.Value(Name{}).(string)
if idOk && nameOk {
return logger.WithField("id", id).WithField("name", name).WithContext(ctx)
}
return logrus.NewEntry(logger).WithContext(ctx)
}
func StartSpan(ctx context.Context) (context.Context, trace.Span) {
spanName := "unknown"
// Use function signature as a span name if available
if pc, _, _, ok := runtime.Caller(1); ok {
spanName = runtime.FuncForPC(pc).Name()
} else {
Debugf(ctx, "Unable to retrieve a caller when starting span")
}
//nolint:spancheck // see https://github.com/jjti/go-spancheck/issues/7
return trace.SpanFromContext(ctx).TracerProvider().Tracer("").Start(ctx, spanName)
}
package references
import (
"fmt"
"github.com/containers/image/v5/docker/reference"
)
// RegistryImageReference is a name of a specific image location on a registry.
// The image may or may not exist, and, in general, what image the name points to may change
// over time.
//
// More specifically:
// - The name always specifies a registry; it is not an alias nor a short name input to a search
// - The name contains a tag xor digest; it does not specify just a repo.
//
// This is intended to be a value type; if a value exists, it contains a valid reference.
type RegistryImageReference struct {
// privateNamed is INTENTIONALLY ENCAPSULATED to provide strong type safety and strong syntax/semantics guarantees.
// Use typed values, not strings, everywhere it is even remotely possible.
privateNamed reference.Named // Satisfies !reference.IsNameOnly
}
// RegistryImageReferenceFromRaw is an internal constructor of a RegistryImageReference.
//
// This should only be called from internal/storage.
// It will modify the reference if both digest and tag are specified, stripping the tag and leaving the digest.
// It will also verifies the image is not only a name. If it is only a name, the function errors.
func RegistryImageReferenceFromRaw(rawNamed reference.Named) RegistryImageReference {
_, isTagged := rawNamed.(reference.NamedTagged)
canonical, isDigested := rawNamed.(reference.Canonical)
// Strip the tag from ambiguous image references that have a
// digest as well (e.g. `image:tag@sha256:123...`). Such
// image references are supported by docker but, due to their
// ambiguity, explicitly not by containers/image.
if isTagged && isDigested {
canonical, err := reference.WithDigest(reference.TrimNamed(rawNamed), canonical.Digest())
if err != nil {
panic("internal error, reference.WithDigest was not passed a digest, which should not be possible")
}
rawNamed = canonical
}
// Ideally this would be better encapsulated, e.g. in internal/storage/internal, but
// that would require using a type defined with the internal package with a public alias,
// and as of 2023-10 mockgen creates code that refers to the internal target of the alias,
// which doesn’t compile.
if reference.IsNameOnly(rawNamed) {
panic(fmt.Sprintf("internal error, NewRegistryImageReference with a NameOnly %q", rawNamed.String()))
}
return RegistryImageReference{privateNamed: rawNamed}
}
// ParseRegistryImageReferenceFromOutOfProcessData constructs a RegistryImageReference from a string.
//
// It is only intended for communication with OUT-OF-PROCESS APIs,
// like registry references provided by CRI by Kubelet.
// It will modify the reference if both digest and tag are specified, stripping the tag and leaving the digest.
// It will also verifies the image is not only a name. If it is only a name, the `latest` tag will be added.
func ParseRegistryImageReferenceFromOutOfProcessData(input string) (RegistryImageReference, error) {
// Alternatively, should we provide two parsers, one with docker.io/library and :latest defaulting,
// and one only accepting fully-specified reference.Named.String() values?
ref, err := reference.ParseNormalizedNamed(input)
if err != nil {
return RegistryImageReference{}, err
}
ref = reference.TagNameOnly(ref)
return RegistryImageReferenceFromRaw(ref), nil
}
func (ref RegistryImageReference) ensureInitialized() {
// It’s deeply disappointing that we need to check this at runtime, instead of just
// requiring a constructor to be called.
if ref.privateNamed == nil {
panic("internal error, use of an uninitialized RegistryImageReference")
}
}
// StringForOutOfProcessConsumptionOnly is only intended for communication with OUT-OF-PROCESS APIs,
// like image names in CRI status objects.
//
// RegistryImageReference intentionally does not implement String(). Use typed values wherever possible.
func (ref RegistryImageReference) StringForOutOfProcessConsumptionOnly() string {
ref.ensureInitialized()
return ref.privateNamed.String()
}
// Format() is implemented so that log entries can be written, without providing a convenient String() method.
func (ref RegistryImageReference) Format(f fmt.State, verb rune) {
ref.ensureInitialized()
fmt.Fprintf(f, fmt.FormatString(f, verb), ref.privateNamed.String())
}
// Registry returns the host[:port] part of the reference.
func (ref RegistryImageReference) Registry() string {
ref.ensureInitialized()
return reference.Domain(ref.privateNamed)
}
// Raw returns the underlying reference.Named.
//
// The return value is !IsNameOnly, and the repo is registry-qualified.
//
// This should only be called from internal/storage.
func (ref RegistryImageReference) Raw() reference.Named {
// See the comment in RegistryImageReferenceFromRaw about better encapsulation.
ref.ensureInitialized()
return ref.privateNamed
}
package version
import (
"errors"
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"runtime/debug"
"strconv"
"strings"
"text/tabwriter"
"github.com/blang/semver/v4"
"github.com/containers/common/pkg/apparmor"
"github.com/containers/common/pkg/seccomp"
"github.com/google/renameio"
json "github.com/json-iterator/go"
"github.com/sirupsen/logrus"
)
// Version is the version of the build.
const Version = "1.35.0"
// ReleaseMinorVersions are the currently supported minor versions.
var ReleaseMinorVersions = []string{"1.34", "1.33", "1.32"}
// Variables injected during build-time.
var (
buildDate string // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
)
type Info struct {
Version string `json:"version,omitempty"`
GitCommit string `json:"gitCommit,omitempty"`
GitCommitDate string `json:"gitCommitDate,omitempty"`
GitTreeState string `json:"gitTreeState,omitempty"`
BuildDate string `json:"buildDate,omitempty"`
GoVersion string `json:"goVersion,omitempty"`
Compiler string `json:"compiler,omitempty"`
Platform string `json:"platform,omitempty"`
Linkmode string `json:"linkmode,omitempty"`
BuildTags []string `json:"buildTags,omitempty"`
LDFlags string `json:"ldFlags,omitempty"`
SeccompEnabled bool `json:"seccompEnabled"`
AppArmorEnabled bool `json:"appArmorEnabled"`
Dependencies []string `json:"dependencies,omitempty"`
}
// ShouldCrioWipe opens the version file, and parses it and the version string
// If there is a parsing error, then crio should wipe, and the error is returned.
// if parsing is successful, it compares the major and minor versions
// and returns whether the major and minor versions are the same.
// If they differ, then crio should wipe.
func ShouldCrioWipe(versionFileName string) (bool, error) {
return shouldCrioWipe(versionFileName, Version)
}
// shouldCrioWipe is an internal function for testing purposes.
func shouldCrioWipe(versionFileName, versionString string) (bool, error) {
if versionFileName == "" {
return false, nil
}
versionBytes, err := os.ReadFile(versionFileName)
if err != nil {
return true, err
}
// parse the version that was laid down by a previous invocation of crio
var oldVersion semver.Version
if err := oldVersion.UnmarshalJSON(versionBytes); err != nil {
return true, fmt.Errorf("version file %s malformatted: %w", versionFileName, err)
}
// parse the version of the current binary
newVersion, err := parseVersionConstant(versionString, "")
if err != nil {
return true, fmt.Errorf("version constant %s malformatted: %w", versionString, err)
}
// in every case that the minor and major version are out of sync,
// we want to preform a {down,up}grade. The common case here is newVersion > oldVersion,
// but even in the opposite case, images are out of date and could be wiped
return newVersion.Major != oldVersion.Major || newVersion.Minor != oldVersion.Minor, nil
}
// WriteVersionFile writes the version information to a given file is the
// location of the old version file gitCommit is the current git commit
// version. It will be added to the file to aid in debugging, but will not be
// used to compare versions.
func (i *Info) WriteVersionFile(file string) error {
return writeVersionFile(file, i.GitCommit, Version)
}
// LogVersion logs the version and git information of this build.
func (i *Info) LogVersion() {
logrus.Infof("Starting CRI-O, version: %s, git: %v(%s)", Version, i.GitCommit, i.GitTreeState)
}
// writeVersionFile is an internal function for testing purposes.
func writeVersionFile(file, gitCommit, version string) error {
if file == "" {
return nil
}
current, err := parseVersionConstant(version, gitCommit)
// Sanity check-this should never happen
if err != nil {
return err
}
j, err := current.MarshalJSON()
// Sanity check-this should never happen
if err != nil {
return err
}
// Create the top level directory if it doesn't exist
if err := os.MkdirAll(filepath.Dir(file), 0o755); err != nil {
return err
}
return renameio.WriteFile(file, j, 0o644)
}
// parseVersionConstant parses the Version variable above
// a const crioVersion would be kept, but golang doesn't support
// const structs. We will instead spend some runtime on CRI-O startup
// Because the version string doesn't keep track of the git commit,
// but it could be useful for debugging, we pass it in here
// If our version constant is properly formatted, this should never error.
func parseVersionConstant(versionString, gitCommit string) (*semver.Version, error) {
v, err := semver.Make(versionString)
if err != nil {
return nil, err
}
if gitCommit != "" {
gitBuild, err := semver.NewBuildVersion(strings.Trim(gitCommit, "\""))
// If gitCommit is empty, silently error, as it's helpful, but not needed.
if err == nil {
v.Build = append(v.Build, gitBuild)
}
}
return &v, nil
}
func Get(verbose bool) (*Info, error) {
info, ok := debug.ReadBuildInfo()
if !ok {
return nil, errors.New("unable to retrieve build info")
}
const unknown = "unknown"
gitCommit := unknown
gitTreeState := "clean"
gitCommitDate := unknown
buildTags := []string{}
ldFlags := unknown
for _, s := range info.Settings {
switch s.Key {
case "vcs.revision":
gitCommit = s.Value
case "vcs.modified":
if s.Value == "true" {
gitTreeState = "dirty"
}
case "vcs.time":
gitCommitDate = s.Value
case "-tags":
buildTags = strings.Split(s.Value, ",")
case "-ldflags":
ldFlags = s.Value
}
}
dependencies := []string{}
if verbose {
for _, d := range info.Deps {
dependencies = append(
dependencies,
fmt.Sprintf("%s %s %s", d.Path, d.Version, d.Sum),
)
}
}
return &Info{
Version: Version,
GitCommit: gitCommit,
GitCommitDate: gitCommitDate,
GitTreeState: gitTreeState,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
Linkmode: linkmode,
BuildTags: buildTags,
LDFlags: ldFlags,
SeccompEnabled: seccomp.IsEnabled(),
AppArmorEnabled: apparmor.IsEnabled(),
Dependencies: dependencies,
}, nil
}
// String returns the string representation of the version info.
func (i *Info) String() string {
b := strings.Builder{}
w := tabwriter.NewWriter(&b, 0, 0, 2, ' ', 0)
v := reflect.ValueOf(*i)
t := v.Type()
for i := range t.NumField() {
field := t.Field(i)
value := v.FieldByName(field.Name)
valueString := ""
isMultiLineValue := false
switch field.Type.Kind() {
case reflect.Bool:
valueString = strconv.FormatBool(value.Bool())
case reflect.Slice:
// Only expecting []string here; ignore other slices.
if s, ok := value.Interface().([]string); ok {
const sep = "\n "
valueString = sep + strings.Join(s, sep)
}
isMultiLineValue = true
case reflect.String:
valueString = value.String()
}
if strings.TrimSpace(valueString) != "" {
fmt.Fprintf(w, "%s:", field.Name)
if isMultiLineValue {
fmt.Fprint(w, valueString)
} else {
fmt.Fprintf(w, "\t%s", valueString)
}
if i+1 < t.NumField() {
fmt.Fprintf(w, "\n")
}
}
}
w.Flush()
return b.String()
}
// JSONString returns the JSON representation of the version info.
func (i *Info) JSONString() (string, error) {
b, err := json.MarshalIndent(i, "", " ")
if err != nil {
return "", err
}
return string(b), nil
}
package config
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"regexp"
"slices"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/containers/common/pkg/hooks"
conmonrsClient "github.com/containers/conmon-rs/pkg/client"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
cpConfig "github.com/cri-o/crio-credential-provider/pkg/config"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-units"
"github.com/opencontainers/runtime-spec/specs-go/features"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
"k8s.io/utils/cpuset"
"k8s.io/utils/ptr"
"tags.cncf.io/container-device-interface/pkg/cdi"
"github.com/cri-o/cri-o/internal/config/apparmor"
"github.com/cri-o/cri-o/internal/config/blockio"
"github.com/cri-o/cri-o/internal/config/capabilities"
"github.com/cri-o/cri-o/internal/config/cgmgr"
"github.com/cri-o/cri-o/internal/config/cnimgr"
"github.com/cri-o/cri-o/internal/config/conmonmgr"
"github.com/cri-o/cri-o/internal/config/device"
"github.com/cri-o/cri-o/internal/config/node"
"github.com/cri-o/cri-o/internal/config/nri"
"github.com/cri-o/cri-o/internal/config/nsmgr"
"github.com/cri-o/cri-o/internal/config/rdt"
"github.com/cri-o/cri-o/internal/config/seccomp"
"github.com/cri-o/cri-o/internal/config/ulimits"
"github.com/cri-o/cri-o/internal/log"
"github.com/cri-o/cri-o/internal/storage/references"
"github.com/cri-o/cri-o/pkg/annotations"
"github.com/cri-o/cri-o/server/metrics/collectors"
"github.com/cri-o/cri-o/server/useragent"
"github.com/cri-o/cri-o/utils"
"github.com/cri-o/cri-o/utils/cmdrunner"
)
// Defaults if none are specified.
const (
defaultGRPCMaxMsgSize = 80 * 1024 * 1024
// default minimum memory for all other runtimes.
defaultContainerMinMemory = 12 * 1024 * 1024 // 12 MiB
// minimum memory for crun, the default runtime.
defaultContainerMinMemoryCrun = 500 * 1024 // 500 KiB
OCIBufSize = 8192
RuntimeTypeVM = "vm"
RuntimeTypePod = "pod"
defaultCtrStopTimeout = 30 // seconds
defaultNamespacesDir = "/var/run"
RuntimeTypeVMBinaryPattern = "containerd-shim-([a-zA-Z0-9\\-\\+])+-v2"
tasksetBinary = "taskset"
MonitorExecCgroupDefault = ""
MonitorExecCgroupContainer = "container"
)
// Config represents the entire set of configuration values that can be set for
// the server. This is intended to be loaded from a toml-encoded config file.
type Config struct {
Comment string
singleConfigPath string // Path to the single config file
dropInConfigDir string // Path to the drop-in config files
RootConfig
APIConfig
RuntimeConfig
ImageConfig
NetworkConfig
MetricsConfig
TracingConfig
StatsConfig
NRI *nri.Config
SystemContext *types.SystemContext
}
// Iface provides a config interface for data encapsulation.
type Iface interface {
GetStore() (storage.Store, error)
GetData() *Config
}
// GetData returns the Config of a Iface.
func (c *Config) GetData() *Config {
return c
}
// ImageVolumesType describes image volume handling strategies.
type ImageVolumesType string
const (
// ImageVolumesMkdir option is for using mkdir to handle image volumes.
ImageVolumesMkdir ImageVolumesType = "mkdir"
// ImageVolumesIgnore option is for ignoring image volumes altogether.
ImageVolumesIgnore ImageVolumesType = "ignore"
// ImageVolumesBind option is for using bind mounted volumes.
)
const (
// DefaultPidsLimit is the default value for maximum number of processes
// allowed inside a container.
DefaultPidsLimit = -1
// DefaultLogSizeMax is the default value for the maximum log size
// allowed for a container. Negative values mean that no limit is imposed.
DefaultLogSizeMax = -1
)
const (
// DefaultBlockIOConfigFile is the default value for blockio controller configuration file.
DefaultBlockIOConfigFile = ""
// DefaultBlockIOReload is the default value for reloading blockio with changed config file and block devices.
DefaultBlockIOReload = false
)
const (
// DefaultIrqBalanceConfigFile default irqbalance service configuration file path.
DefaultIrqBalanceConfigFile = "/etc/sysconfig/irqbalance"
// DefaultIrqBalanceConfigRestoreFile contains the banned cpu mask configuration to restore. Name due to backward compatibility.
DefaultIrqBalanceConfigRestoreFile = "/etc/sysconfig/orig_irq_banned_cpus"
)
// This structure is necessary to fake the TOML tables when parsing,
// while also not requiring a bunch of layered structs for no good
// reason.
// RootConfig represents the root of the "crio" TOML config table.
type RootConfig struct {
// Root is a path to the "root directory" where data not
// explicitly handled by other options will be stored.
Root string `toml:"root"`
// RunRoot is a path to the "run directory" where state information not
// explicitly handled by other options will be stored.
RunRoot string `toml:"runroot"`
// ImageStore if set it will allow end-users to store newly pulled image
// in path provided by `ImageStore` instead of path provided in `Root`.
ImageStore string `toml:"imagestore"`
// Storage is the name of the storage driver which handles actually
// storing the contents of containers.
Storage string `toml:"storage_driver"`
// StorageOption is a list of storage driver specific options.
StorageOptions []string `toml:"storage_option"`
// PullOptions is a map of pull options that are passed to the storage driver.
pullOptions map[string]string
// LogDir is the default log directory where all logs will go unless kubelet
// tells us to put them somewhere else.
LogDir string `toml:"log_dir"`
// VersionFile is the location CRI-O will lay down the version file
// that checks whether we've rebooted
VersionFile string `toml:"version_file"`
// VersionFilePersist is the location CRI-O will lay down the version file
// that checks whether we've upgraded
VersionFilePersist string `toml:"version_file_persist"`
// CleanShutdownFile is the location CRI-O will lay down the clean shutdown file
// that checks whether we've had time to sync before shutting down
CleanShutdownFile string `toml:"clean_shutdown_file"`
// InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts.
// If set to false, one must use the external command `crio wipe` to wipe the containers and images in these situations.
// The option InternalWipe is deprecated, and will be removed in a future release.
InternalWipe bool `toml:"internal_wipe"`
// InternalRepair is used to repair the affected images.
InternalRepair bool `toml:"internal_repair"`
}
// GetStore returns the container storage for a given configuration.
func (c *RootConfig) GetStore() (storage.Store, error) {
return storage.GetStore(storage.StoreOptions{
RunRoot: c.RunRoot,
GraphRoot: c.Root,
ImageStore: c.ImageStore,
GraphDriverName: c.Storage,
GraphDriverOptions: c.StorageOptions,
PullOptions: c.pullOptions,
})
}
// runtimeHandlerFeatures represents the supported features of the runtime.
type runtimeHandlerFeatures struct {
RecursiveReadOnlyMounts bool `json:"-"` // Internal use only.
features.Features
}
// RuntimeHandler represents each item of the "crio.runtime.runtimes" TOML
// config table.
type RuntimeHandler struct {
RuntimeConfigPath string `toml:"runtime_config_path"`
RuntimePath string `toml:"runtime_path"`
RuntimeType string `toml:"runtime_type"`
RuntimeRoot string `toml:"runtime_root"`
// PrivilegedWithoutHostDevices can be used to restrict passing host devices
// to a container running as privileged.
PrivilegedWithoutHostDevices bool `toml:"privileged_without_host_devices,omitempty"`
// AllowedAnnotations is a slice of experimental annotations that this runtime handler is allowed to process.
// The currently recognized values are:
// "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
// "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
// "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
// "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container.
// "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook.
// "io.kubernetes.cri-o.LinkLogs" for linking logs into the pod.
// "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for:
// - a specific container by using: `seccomp-profile.kubernetes.cri-o.io/<CONTAINER_NAME>`
// - a whole pod by using: `seccomp-profile.kubernetes.cri-o.io/POD`
// Note that the annotation works on containers as well as on images.
// For images, the plain annotation `seccomp-profile.kubernetes.cri-o.io`
// can be used without the required `/POD` suffix or a container name.
// "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode for a pod within a FIPS-enabled Kubernetes cluster.
AllowedAnnotations []string `toml:"allowed_annotations,omitempty"`
// DisallowedAnnotations is the slice of experimental annotations that are not allowed for this handler.
DisallowedAnnotations []string `toml:"-"`
// Fields prefixed by Monitor hold the configuration for the monitor for this runtime. At present, the following monitors are supported:
// oci supports conmon
// vm does not support any runtime monitor
MonitorPath string `toml:"monitor_path,omitempty"`
MonitorCgroup string `toml:"monitor_cgroup,omitempty"`
MonitorEnv []string `toml:"monitor_env,omitempty"`
// MonitorExecCgroup indicates whether to move exec probes to the container's cgroup.
MonitorExecCgroup string `toml:"monitor_exec_cgroup,omitempty"`
// PlatformRuntimePaths defines a configuration option that specifies
// the runtime paths for different platforms.
PlatformRuntimePaths map[string]string `toml:"platform_runtime_paths,omitempty"`
// Marks the runtime as performing image pulling on its own, and doesn't
// require crio to do it.
RuntimePullImage bool `toml:"runtime_pull_image,omitempty"`
// ContainerMinMemory is the minimum memory that must be set for a container.
ContainerMinMemory string `toml:"container_min_memory,omitempty"`
// NoSyncLog if enabled will disable fsync on log rotation and container exit.
// This can improve performance but may result in data loss on hard system crashes.
NoSyncLog bool `toml:"no_sync_log"`
// Output of the "features" subcommand.
// This is populated dynamically and not read from config.
features runtimeHandlerFeatures
// Inheritance request
// Fill in the Runtime information (paths and type) from the default runtime
InheritDefaultRuntime bool `toml:"inherit_default_runtime,omitempty"`
// Default annotations specified for runtime handler if they're not overridden by
// the pod spec.
DefaultAnnotations map[string]string `toml:"default_annotations,omitempty"`
// StreamWebsockets can be used to enable the WebSocket protocol for
// container exec, attach and port forward.
//
// conmon-rs (runtime_type = "pod") supports this configuration for exec
// and attach. Forwarding ports will be supported in future releases.
StreamWebsockets bool `toml:"stream_websockets,omitempty"`
// ExecCPUAffinity specifies which CPU is used when exec-ing the container.
// The valid values are:
// "":
// Use runtime default.
// "first":
// When it has only exclusive cpuset, use the first CPU in the exclusive cpuset.
// When it has both shared and exclusive cpusets, use first CPU in the shared cpuset.
ExecCPUAffinity ExecCPUAffinityType `toml:"exec_cpu_affinity,omitempty"`
// SeccompProfile is the absolute path of the seccomp.json profile which is used as the
// default for the runtime. This configuration takes precedence over runtime config seccomp_profile.
// If set to "", the runtime config seccomp_profile will be used.
// If that is also set to "", the internal default seccomp profile will be applied.
SeccompProfile string `toml:"seccomp_profile,omitempty"`
// seccompConfig is the seccomp configuration for the handler.
seccompConfig *seccomp.Config
}
type ExecCPUAffinityType string
const (
ExecCPUAffinityTypeDefault ExecCPUAffinityType = ""
ExecCPUAffinityTypeFirst ExecCPUAffinityType = "first"
runtimeSeccompProfileDefault string = ""
)
// Multiple runtime Handlers in a map.
type Runtimes map[string]*RuntimeHandler
// RuntimeConfig represents the "crio.runtime" TOML config table.
type RuntimeConfig struct {
// NoPivot instructs the runtime to not use `pivot_root`, but instead use `MS_MOVE`
NoPivot bool `toml:"no_pivot"`
// SELinux determines whether or not SELinux is used for pod separation.
SELinux bool `toml:"selinux"`
// Whether container output should be logged to journald in addition
// to the kubernetes log file
LogToJournald bool `toml:"log_to_journald"`
// DropInfraCtr determines whether the infra container is dropped when appropriate.
DropInfraCtr bool `toml:"drop_infra_ctr"`
// ReadOnly run all pods/containers in read-only mode.
// This mode will mount tmpfs on /run, /tmp and /var/tmp, if those are not mountpoints
// Will also set the readonly flag in the OCI Runtime Spec. In this mode containers
// will only be able to write to volumes mounted into them
ReadOnly bool `toml:"read_only"`
// ConmonEnv is the environment variable list for conmon process.
// This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
ConmonEnv []string `toml:"conmon_env"`
// HooksDir holds paths to the directories containing hooks
// configuration files. When the same filename is present in
// multiple directories, the file in the directory listed last in
// this slice takes precedence.
HooksDir []string `toml:"hooks_dir"`
// Capabilities to add to all containers.
DefaultCapabilities capabilities.Capabilities `toml:"default_capabilities"`
// AddInheritableCapabilities can be set to add inheritable capabilities. They were pre-1.23 by default, and were dropped in 1.24.
// This can cause a regression with non-root users not getting capabilities as they previously did.
AddInheritableCapabilities bool `toml:"add_inheritable_capabilities"`
// Additional environment variables to set for all the
// containers. These are overridden if set in the
// container image spec or in the container runtime configuration.
DefaultEnv []string `toml:"default_env"`
// Sysctls to add to all containers.
DefaultSysctls []string `toml:"default_sysctls"`
// DefaultUlimits specifies the default ulimits to apply to containers
DefaultUlimits []string `toml:"default_ulimits"`
// Devices that are allowed to be configured.
AllowedDevices []string `toml:"allowed_devices"`
// Devices to add to containers
AdditionalDevices []string `toml:"additional_devices"`
// CDISpecDirs specifies the directories CRI-O/CDI will scan for CDI Spec files.
CDISpecDirs []string `toml:"cdi_spec_dirs"`
// DeviceOwnershipFromSecurityContext changes the default behavior of setting container devices uid/gid
// from CRI's SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. Defaults to false.
DeviceOwnershipFromSecurityContext bool `toml:"device_ownership_from_security_context"`
// DefaultRuntime is the _name_ of the OCI runtime to be used as the default.
// The name is matched against the Runtimes map below.
DefaultRuntime string `toml:"default_runtime"`
// DecryptionKeysPath is the path where keys for image decryption are stored.
DecryptionKeysPath string `toml:"decryption_keys_path"`
// Conmon is the path to conmon binary, used for managing the runtime.
// This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorConfig.Path.
Conmon string `toml:"conmon"`
// ConmonCgroup is the cgroup setting used for conmon.
// This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorConfig.Cgroup.
ConmonCgroup string `toml:"conmon_cgroup"`
// SeccompProfile is the seccomp.json profile path which is used as the
// default for the runtime.
// If set to "" or not found, the internal default seccomp profile will be used.
SeccompProfile string `toml:"seccomp_profile"`
// PrivilegedSeccompProfile can be set to enable a seccomp profile for
// privileged containers from the local path.
PrivilegedSeccompProfile string `toml:"privileged_seccomp_profile"`
// ApparmorProfile is the apparmor profile name which is used as the
// default for the runtime.
ApparmorProfile string `toml:"apparmor_profile"`
// BlockIOConfigFile is the path to the blockio class configuration
// file for configuring the cgroup blockio controller.
BlockIOConfigFile string `toml:"blockio_config_file"`
// BlockIOReload instructs the runtime to reload blockio configuration
// rescan block devices in the system before assigning blockio parameters.
BlockIOReload bool `toml:"blockio_reload"`
// IrqBalanceConfigFile is the irqbalance service config file which is used
// for configuring irqbalance daemon.
IrqBalanceConfigFile string `toml:"irqbalance_config_file"`
// RdtConfigFile is the RDT config file used for configuring resctrl fs
RdtConfigFile string `toml:"rdt_config_file"`
// CgroupManagerName is the manager implementation name which is used to
// handle cgroups for containers.
CgroupManagerName string `toml:"cgroup_manager"`
// DefaultMountsFile is the file path for the default mounts to be mounted for the container
// Note, for testing purposes mainly
DefaultMountsFile string `toml:"default_mounts_file"`
// ContainerExitsDir is the directory in which container exit files are
// written to by conmon.
ContainerExitsDir string `toml:"container_exits_dir"`
// ContainerAttachSocketDir is the location for container attach sockets.
ContainerAttachSocketDir string `toml:"container_attach_socket_dir"`
// BindMountPrefix is the prefix to use for the source of the bind mounts.
BindMountPrefix string `toml:"bind_mount_prefix"`
// UIDMappings specifies the UID mappings to have in the user namespace.
// A range is specified in the form containerUID:HostUID:Size. Multiple
// ranges are separated by comma.
UIDMappings string `toml:"uid_mappings"`
// MinimumMappableUID specifies the minimum UID value which can be
// specified in a uid_mappings value, whether configured here or sent
// to us via CRI, for a pod that isn't to be run as UID 0.
MinimumMappableUID int64 `toml:"minimum_mappable_uid"`
// GIDMappings specifies the GID mappings to have in the user namespace.
// A range is specified in the form containerUID:HostUID:Size. Multiple
// ranges are separated by comma.
GIDMappings string `toml:"gid_mappings"`
// MinimumMappableGID specifies the minimum GID value which can be
// specified in a gid_mappings value, whether configured here or sent
// to us via CRI, for a pod that isn't to be run as UID 0.
MinimumMappableGID int64 `toml:"minimum_mappable_gid"`
// LogLevel determines the verbosity of the logs based on the level it is set to.
// Options are fatal, panic, error (default), warn, info, debug, and trace.
LogLevel string `toml:"log_level"`
// LogFilter specifies a regular expression to filter the log messages
LogFilter string `toml:"log_filter"`
// NamespacesDir is the directory where the state of the managed namespaces
// gets tracked
NamespacesDir string `toml:"namespaces_dir"`
// PinNSPath is the path to find the pinns binary, which is needed
// to manage namespace lifecycle
PinnsPath string `toml:"pinns_path"`
// CriuPath is the path to find the criu binary, which is needed
// to checkpoint and restore containers
EnableCriuSupport bool `toml:"enable_criu_support"`
// Runtimes defines a list of OCI compatible runtimes. The runtime to
// use is picked based on the runtime_handler provided by the CRI. If
// no runtime_handler is provided, the runtime will be picked based on
// the level of trust of the workload.
Runtimes Runtimes `toml:"runtimes"`
// Workloads defines a list of workloads types that are have grouped settings
// that will be applied to containers.
Workloads Workloads `toml:"workloads"`
// PidsLimit is the number of processes each container is restricted to
// by the cgroup process number controller.
PidsLimit int64 `toml:"pids_limit"`
// LogSizeMax is the maximum number of bytes after which the log file
// will be truncated. It can be expressed as a human-friendly string
// that is parsed to bytes.
// Negative values indicate that the log file won't be truncated.
LogSizeMax int64 `toml:"log_size_max"`
// CtrStopTimeout specifies the time to wait before to generate an
// error because the container state is still tagged as "running".
CtrStopTimeout int64 `toml:"ctr_stop_timeout"`
// SeparatePullCgroup specifies whether an image pull must be performed in a separate cgroup
SeparatePullCgroup string `toml:"separate_pull_cgroup"`
// InfraCtrCPUSet is the CPUs set that will be used to run infra containers
InfraCtrCPUSet string `toml:"infra_ctr_cpuset"`
// SharedCPUSet is the CPUs set that will be used for guaranteed containers that
// want access to shared cpus.
SharedCPUSet string `toml:"shared_cpuset"`
// AbsentMountSourcesToReject is a list of paths that, when absent from the host,
// will cause a container creation to fail (as opposed to the current behavior of creating a directory).
AbsentMountSourcesToReject []string `toml:"absent_mount_sources_to_reject"`
// EnablePodEvents specifies if the container pod-level events should be generated to optimize the PLEG at Kubelet.
EnablePodEvents bool `toml:"enable_pod_events"`
// IrqBalanceConfigRestoreFile is the irqbalance service banned CPU list to restore.
// If empty, no restoration attempt will be done.
IrqBalanceConfigRestoreFile string `toml:"irqbalance_config_restore_file"`
// seccompConfig is the internal seccomp configuration
seccompConfig *seccomp.Config
// apparmorConfig is the internal AppArmor configuration
apparmorConfig *apparmor.Config
// blockioConfig is the internal blockio configuration
blockioConfig *blockio.Config
// rdtConfig is the internal Rdt configuration
rdtConfig *rdt.Config
// ulimitConfig is the internal ulimit configuration
ulimitsConfig *ulimits.Config
// deviceConfig is the internal additional devices configuration
deviceConfig *device.Config
// cgroupManager is the internal CgroupManager configuration
cgroupManager cgmgr.CgroupManager
// conmonManager is the internal ConmonManager configuration
conmonManager *conmonmgr.ConmonManager
// namespaceManager is the internal NamespaceManager configuration
namespaceManager *nsmgr.NamespaceManager
// Whether SELinux should be disabled within a pod,
// when it is running in the host network namespace
// https://github.com/cri-o/cri-o/issues/5501
HostNetworkDisableSELinux bool `toml:"hostnetwork_disable_selinux"`
// Option to disable hostport mapping in CRI-O
// Default value is 'false'
DisableHostPortMapping bool `toml:"disable_hostport_mapping"`
// Option to set the timezone inside the container.
// Use 'Local' to match the timezone of the host machine.
Timezone string `toml:"timezone"`
}
// ImageConfig represents the "crio.image" TOML config table.
type ImageConfig struct {
// DefaultTransport is a value we prefix to image names that fail to
// validate source references.
DefaultTransport string `toml:"default_transport"`
// GlobalAuthFile is a path to a file like /var/lib/kubelet/config.json
// containing credentials necessary for pulling images from secure
// registries.
GlobalAuthFile string `toml:"global_auth_file"`
// NamespacedAuthDir is the root path for pod namespace-separated
// auth files, which is intended to be used together with CRI-O's credential provider:
// https://github.com/cri-o/crio-credential-provider
// The namespaced auth file will be <NAMESPACED_AUTH_DIR>/<NAMESPACE>-<IMAGE_NAME_SHA256>.json,
// where CRI-O moves them into a dedicated location to mark them as "used" during image pull:
// <NAMESPACED_AUTH_DIR>/in-use/<NAMESPACE>-<IMAGE_NAME_SHA256>-<UUID>.json
// Note that image name provided to the credential provider does not
// contain any specific tag or digest, only the normalized repository
// as well as the image name, which can cause races if the same image
// prefix get's pulled on a single node.
// This temporary auth file will be used instead of any configured GlobalAuthFile.
// If no pod namespace is being provided on image pull (via the sandbox
// config), or the concatenated path is non existent, then the system wide
// auth file will be used as fallback.
// Must be an absolute path.
NamespacedAuthDir string `toml:"namespaced_auth_dir"`
// PauseImage is the name of an image on a registry which we use to instantiate infra
// containers. It should start with a registry host name.
// Format is enforced by validation.
PauseImage string `toml:"pause_image"`
// PauseImageAuthFile, if not empty, is a path to a file like
// /var/lib/kubelet/config.json containing credentials necessary
// for pulling PauseImage
PauseImageAuthFile string `toml:"pause_image_auth_file"`
// PauseCommand is the path of the binary we run in an infra
// container that's been instantiated using PauseImage.
PauseCommand string `toml:"pause_command"`
// PinnedImages is a list of container images that should be pinned
// and not subject to garbage collection by kubelet.
// Pinned images will remain in the container runtime's storage until
// they are manually removed. Default value: empty list (no images pinned)
PinnedImages []string `toml:"pinned_images"`
// SignaturePolicyPath is the name of the file which decides what sort
// of policy we use when deciding whether or not to trust an image that
// we've pulled. Outside of testing situations, it is strongly advised
// that this be left unspecified so that the default system-wide policy
// will be used.
SignaturePolicyPath string `toml:"signature_policy"`
// SignaturePolicyDir is the root path for pod namespace-separated
// signature policies. The final policy to be used on image pull will be
// <SIGNATURE_POLICY_DIR>/<NAMESPACE>.json.
// If no pod namespace is being provided on image pull (via the sandbox
// config), or the concatenated path is non existent, then the
// SignaturePolicyPath or system wide policy will be used as fallback.
// Must be an absolute path.
SignaturePolicyDir string `toml:"signature_policy_dir"`
// InsecureRegistries is a list of registries that must be contacted w/o
// TLS verification.
// Deprecated: it's no longer effective. Please use `insecure` in `registries.conf` instead.
InsecureRegistries []string `toml:"insecure_registries"`
// ImageVolumes controls how volumes specified in image config are handled
ImageVolumes ImageVolumesType `toml:"image_volumes"`
// Temporary directory for big files
BigFilesTemporaryDir string `toml:"big_files_temporary_dir"`
// AutoReloadRegistries if set to true, will automatically
// reload the mirror registry when there is an update to the
// 'registries.conf.d' directory.
AutoReloadRegistries bool `toml:"auto_reload_registries"`
// PullProgressTimeout is the timeout for an image pull to make progress
// until the pull operation gets canceled. This value will be also used for
// calculating the pull progress interval to pullProgressTimeout / 10.
// Can be set to 0 to disable the timeout as well as the progress output.
PullProgressTimeout time.Duration `toml:"pull_progress_timeout"`
// OCIArtifactMountSupport is used to determine if CRI-O should support OCI Artifacts.
OCIArtifactMountSupport bool `toml:"oci_artifact_mount_support"`
// ShortNameMode describes the mode of short name resolution.
// The valid values are "enforcing" and "disabled".
// If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous.
// If "disabled", the first result will be chosen.
ShortNameMode string `toml:"short_name_mode"`
}
// NetworkConfig represents the "crio.network" TOML config table.
type NetworkConfig struct {
// CNIDefaultNetwork is the default CNI network name to be selected
CNIDefaultNetwork string `toml:"cni_default_network"`
// NetworkDir is where CNI network configuration files are stored.
NetworkDir string `toml:"network_dir"`
// PluginDir is where CNI plugin binaries are stored.
PluginDir string `toml:"plugin_dir,omitempty"`
// PluginDirs is where CNI plugin binaries are stored.
PluginDirs []string `toml:"plugin_dirs"`
// cniManager manages the internal ocicni plugin
cniManager *cnimgr.CNIManager
}
// APIConfig represents the "crio.api" TOML config table.
type APIConfig struct {
// GRPCMaxSendMsgSize is the maximum grpc send message size in bytes.
GRPCMaxSendMsgSize int `toml:"grpc_max_send_msg_size"`
// GRPCMaxRecvMsgSize is the maximum grpc receive message size in bytes.
GRPCMaxRecvMsgSize int `toml:"grpc_max_recv_msg_size"`
// Listen is the path to the AF_LOCAL socket on which cri-o will listen.
// This may support proto://addr formats later, but currently this is just
// a path.
Listen string `toml:"listen"`
// StreamAddress is the IP address on which the stream server will listen.
StreamAddress string `toml:"stream_address"`
// StreamPort is the port on which the stream server will listen.
StreamPort string `toml:"stream_port"`
// StreamEnableTLS enables encrypted tls transport of the stream server
StreamEnableTLS bool `toml:"stream_enable_tls"`
// StreamTLSCert is the x509 certificate file path used to serve the encrypted stream
StreamTLSCert string `toml:"stream_tls_cert"`
// StreamTLSKey is the key file path used to serve the encrypted stream
StreamTLSKey string `toml:"stream_tls_key"`
// StreamTLSCA is the x509 CA(s) file used to verify and authenticate client
// communication with the tls encrypted stream
StreamTLSCA string `toml:"stream_tls_ca"`
// StreamIdleTimeout is how long to leave idle connections open for
StreamIdleTimeout string `toml:"stream_idle_timeout"`
}
// MetricsConfig specifies all necessary configuration for Prometheus based
// metrics retrieval.
type MetricsConfig struct {
// EnableMetrics can be used to globally enable or disable metrics support
EnableMetrics bool `toml:"enable_metrics"`
// MetricsCollectors specifies enabled metrics collectors.
MetricsCollectors collectors.Collectors `toml:"metrics_collectors"`
// MetricsHost is the IP address or hostname on which the metrics server will listen.
MetricsHost string `toml:"metrics_host"`
// MetricsPort is the port on which the metrics server will listen.
MetricsPort int `toml:"metrics_port"`
// Local socket path to bind the metrics server to
MetricsSocket string `toml:"metrics_socket"`
// MetricsCert is the certificate for the secure metrics server.
MetricsCert string `toml:"metrics_cert"`
// MetricsKey is the certificate key for the secure metrics server.
MetricsKey string `toml:"metrics_key"`
}
// TracingConfig specifies all necessary configuration for opentelemetry trace exports.
type TracingConfig struct {
// EnableTracing can be used to globally enable or disable tracing support
EnableTracing bool `toml:"enable_tracing"`
// TracingEndpoint is the address on which the grpc tracing collector server will listen.
TracingEndpoint string `toml:"tracing_endpoint"`
// TracingSamplingRatePerMillion is the number of samples to collect per million spans. Set to 1000000 to always sample.
// Defaults to 0.
TracingSamplingRatePerMillion int `toml:"tracing_sampling_rate_per_million"`
}
// StatsConfig specifies all necessary configuration for reporting container/pod stats
// and pod sandbox metrics.
type StatsConfig struct {
// StatsCollectionPeriod is the number of seconds between collecting pod and container stats.
// If set to 0, the stats are collected on-demand instead.
StatsCollectionPeriod int `toml:"stats_collection_period"`
// CollectionPeriod is the number of seconds between collecting pod/container stats
// and pod sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead.
CollectionPeriod int `toml:"collection_period"`
// IncludedPodMetrics specifies the list of metrics to include when collecting pod metrics.
// If empty, all available metrics will be collected.
IncludedPodMetrics []string `toml:"included_pod_metrics"`
}
// tomlConfig is another way of looking at a Config, which is
// TOML-friendly (it has all of the explicit tables). It's just used for
// conversions.
type tomlConfig struct {
Crio struct {
RootConfig
API struct{ APIConfig } `toml:"api"`
Runtime struct{ RuntimeConfig } `toml:"runtime"`
Image struct{ ImageConfig } `toml:"image"`
Network struct{ NetworkConfig } `toml:"network"`
Metrics struct{ MetricsConfig } `toml:"metrics"`
Tracing struct{ TracingConfig } `toml:"tracing"`
Stats struct{ StatsConfig } `toml:"stats"`
NRI struct{ *nri.Config } `toml:"nri"`
} `toml:"crio"`
}
// SetSystemContext configures the SystemContext used by containers/image library.
func (t *tomlConfig) SetSystemContext(c *Config) {
c.SystemContext.BigFilesTemporaryDir = c.BigFilesTemporaryDir
c.SystemContext.ShortNameMode = ptr.To(types.ShortNameModeEnforcing)
if c.ShortNameMode == "disabled" {
c.SystemContext.ShortNameMode = ptr.To(types.ShortNameModeDisabled)
}
}
func (t *tomlConfig) toConfig(c *Config) {
c.Comment = "# "
c.RootConfig = t.Crio.RootConfig
c.APIConfig = t.Crio.API.APIConfig
c.RuntimeConfig = t.Crio.Runtime.RuntimeConfig
c.ImageConfig = t.Crio.Image.ImageConfig
c.NetworkConfig = t.Crio.Network.NetworkConfig
c.MetricsConfig = t.Crio.Metrics.MetricsConfig
c.TracingConfig = t.Crio.Tracing.TracingConfig
c.StatsConfig = t.Crio.Stats.StatsConfig
c.NRI = t.Crio.NRI.Config
t.SetSystemContext(c)
}
func (t *tomlConfig) fromConfig(c *Config) {
t.Crio.RootConfig = c.RootConfig
t.Crio.API.APIConfig = c.APIConfig
t.Crio.Runtime.RuntimeConfig = c.RuntimeConfig
t.Crio.Image.ImageConfig = c.ImageConfig
t.Crio.Network.NetworkConfig = c.NetworkConfig
t.Crio.Metrics.MetricsConfig = c.MetricsConfig
t.Crio.Tracing.TracingConfig = c.TracingConfig
t.Crio.Stats.StatsConfig = c.StatsConfig
t.Crio.NRI.Config = c.NRI
}
const configLogPrefix = "Updating config from "
// UpdateFromFile populates the Config from the TOML-encoded file at the given
// path and "remembers" that we should reload this file's contents when we
// receive a SIGHUP.
// Returns errors encountered when reading or parsing the files, or nil
// otherwise.
func (c *Config) UpdateFromFile(ctx context.Context, path string) error {
log.Infof(ctx, configLogPrefix+"single file: %s", path)
if err := c.UpdateFromDropInFile(ctx, path); err != nil {
return fmt.Errorf("update config from drop-in file: %w", err)
}
c.singleConfigPath = path
return nil
}
// UpdateFromDropInFile populates the Config from the TOML-encoded file at the
// given path. The file may be the main configuration file, or it can be one
// of the drop-in files which are used to supplement it.
// Returns errors encountered when reading or parsing the files, or nil
// otherwise.
func (c *Config) UpdateFromDropInFile(ctx context.Context, path string) error {
log.Infof(ctx, configLogPrefix+"drop-in file: %s", path)
// keeps the storage options from storage.conf and merge it to crio config
var storageOpts []string
storageOpts = append(storageOpts, c.StorageOptions...)
// storage configurations from storage.conf, if crio config has no values for these, they will be merged to crio config
graphRoot := c.Root
runRoot := c.RunRoot
storageDriver := c.Storage
data, err := os.ReadFile(path)
if err != nil {
return err
}
t := new(tomlConfig)
t.fromConfig(c)
_, err = toml.Decode(string(data), t)
if err != nil {
return fmt.Errorf("unable to decode configuration %v: %w", path, err)
}
storageOpts = append(storageOpts, t.Crio.StorageOptions...)
storageOpts = removeDupStorageOpts(storageOpts)
t.Crio.StorageOptions = storageOpts
// inherits storage configurations from storage.conf
if t.Crio.Root == "" {
t.Crio.Root = graphRoot
}
if t.Crio.RunRoot == "" {
t.Crio.RunRoot = runRoot
}
if t.Crio.Storage == "" {
t.Crio.Storage = storageDriver
}
t.toConfig(c)
return nil
}
// removeDupStorageOpts removes duplicated storage option from the list
// keeps the last appearance.
func removeDupStorageOpts(storageOpts []string) []string {
var resOpts []string
opts := make(map[string]bool)
for i := len(storageOpts) - 1; i >= 0; i-- {
if ok := opts[storageOpts[i]]; ok {
continue
}
opts[storageOpts[i]] = true
resOpts = append(resOpts, storageOpts[i])
}
for i, j := 0, len(resOpts)-1; i < j; i, j = i+1, j-1 {
resOpts[i], resOpts[j] = resOpts[j], resOpts[i]
}
return resOpts
}
// UpdateFromPath recursively iterates the provided path and updates the
// configuration for it.
func (c *Config) UpdateFromPath(ctx context.Context, path string) error {
log.Infof(ctx, configLogPrefix+"path: %s", path)
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
return nil
}
if err := filepath.Walk(path,
func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
return c.UpdateFromDropInFile(ctx, p)
}); err != nil {
return fmt.Errorf("walk path: %w", err)
}
c.dropInConfigDir = path
return nil
}
// ToFile outputs the given Config as a TOML-encoded file at the given path.
// Returns errors encountered when generating or writing the file, or nil
// otherwise.
func (c *Config) ToFile(path string) error {
b, err := c.ToBytes()
if err != nil {
return err
}
return os.WriteFile(path, b, 0o644)
}
// ToString encodes the config into a string value.
func (c *Config) ToString() (string, error) {
configBytes, err := c.ToBytes()
if err != nil {
return "", err
}
return string(configBytes), nil
}
// ToBytes encodes the config into a byte slice. It errors if the encoding
// fails, which should never happen at all because of general type safeness.
func (c *Config) ToBytes() ([]byte, error) {
var buffer bytes.Buffer
e := toml.NewEncoder(&buffer)
tc := tomlConfig{}
tc.fromConfig(c)
if err := e.Encode(tc); err != nil {
return nil, err
}
return buffer.Bytes(), nil
}
// DefaultConfig returns the default configuration for crio.
func DefaultConfig() (*Config, error) {
storeOpts, err := storage.DefaultStoreOptions()
if err != nil {
return nil, err
}
cgroupManager := cgmgr.New()
ua, err := useragent.Get()
if err != nil {
return nil, fmt.Errorf("get user agent: %w", err)
}
return &Config{
Comment: "# ",
SystemContext: &types.SystemContext{
DockerRegistryUserAgent: ua,
},
RootConfig: RootConfig{
Root: storeOpts.GraphRoot,
RunRoot: storeOpts.RunRoot,
ImageStore: storeOpts.ImageStore,
Storage: storeOpts.GraphDriverName,
StorageOptions: storeOpts.GraphDriverOptions,
pullOptions: storeOpts.PullOptions,
LogDir: "/var/log/crio/pods",
VersionFile: CrioVersionPathTmp,
CleanShutdownFile: CrioCleanShutdownFile,
InternalWipe: true,
InternalRepair: true,
},
APIConfig: APIConfig{
Listen: CrioSocketPath,
StreamAddress: "127.0.0.1",
StreamPort: "0",
GRPCMaxSendMsgSize: defaultGRPCMaxMsgSize,
GRPCMaxRecvMsgSize: defaultGRPCMaxMsgSize,
},
RuntimeConfig: *DefaultRuntimeConfig(cgroupManager),
ImageConfig: ImageConfig{
DefaultTransport: "docker://",
PauseImage: DefaultPauseImage,
PauseCommand: "/pause",
ImageVolumes: ImageVolumesMkdir,
SignaturePolicyDir: "/etc/crio/policies",
PullProgressTimeout: 0,
OCIArtifactMountSupport: true,
ShortNameMode: "enforcing",
NamespacedAuthDir: cpConfig.AuthDir,
},
NetworkConfig: NetworkConfig{
NetworkDir: cniConfigDir,
PluginDirs: []string{cniBinDir},
},
MetricsConfig: MetricsConfig{
MetricsHost: "127.0.0.1",
MetricsPort: 9090,
MetricsCollectors: collectors.All(),
},
TracingConfig: TracingConfig{
TracingEndpoint: "127.0.0.1:4317",
TracingSamplingRatePerMillion: 0,
EnableTracing: false,
},
NRI: nri.New(),
}, nil
}
// DefaultRuntimeConfig returns the default Runtime configs.
func DefaultRuntimeConfig(cgroupManager cgmgr.CgroupManager) *RuntimeConfig {
return &RuntimeConfig{
AllowedDevices: []string{"/dev/fuse", "/dev/net/tun"},
DecryptionKeysPath: "/etc/crio/keys/",
DefaultRuntime: DefaultRuntime,
Runtimes: Runtimes{
DefaultRuntime: defaultRuntimeHandler(cgroupManager.IsSystemd()),
},
SELinux: selinuxEnabled(),
ApparmorProfile: apparmor.DefaultProfile,
BlockIOConfigFile: DefaultBlockIOConfigFile,
BlockIOReload: DefaultBlockIOReload,
IrqBalanceConfigFile: DefaultIrqBalanceConfigFile,
RdtConfigFile: rdt.DefaultRdtConfigFile,
CgroupManagerName: cgroupManager.Name(),
PidsLimit: DefaultPidsLimit,
ContainerExitsDir: containerExitsDir,
ContainerAttachSocketDir: ContainerAttachSocketDir,
MinimumMappableUID: -1,
MinimumMappableGID: -1,
LogSizeMax: DefaultLogSizeMax,
CtrStopTimeout: defaultCtrStopTimeout,
DefaultCapabilities: capabilities.Default(),
LogLevel: "info",
HooksDir: []string{hooks.DefaultDir},
CDISpecDirs: cdi.DefaultSpecDirs,
NamespacesDir: defaultNamespacesDir,
DropInfraCtr: true,
IrqBalanceConfigRestoreFile: DefaultIrqBalanceConfigRestoreFile,
seccompConfig: seccomp.New(),
apparmorConfig: apparmor.New(),
blockioConfig: blockio.New(),
cgroupManager: cgroupManager,
deviceConfig: device.New(),
namespaceManager: nsmgr.New(defaultNamespacesDir, ""),
rdtConfig: rdt.New(),
ulimitsConfig: ulimits.New(),
HostNetworkDisableSELinux: true,
DisableHostPortMapping: false,
EnableCriuSupport: true,
}
}
// Validate is the main entry point for library configuration validation.
// The parameter `onExecution` specifies if the validation should include
// execution checks. It returns an `error` on validation failure, otherwise
// `nil`.
func (c *Config) Validate(onExecution bool) error {
switch c.ImageVolumes {
case ImageVolumesMkdir:
case ImageVolumesIgnore:
case ImageVolumesBind:
default:
return errors.New("unrecognized image volume type specified")
}
if onExecution {
if err := node.ValidateConfig(); err != nil {
return err
}
}
if err := c.RootConfig.Validate(onExecution); err != nil {
return fmt.Errorf("validating root config: %w", err)
}
if err := c.RuntimeConfig.Validate(c.SystemContext, onExecution); err != nil {
return fmt.Errorf("validating runtime config: %w", err)
}
c.seccompConfig.SetNotifierPath(
filepath.Join(filepath.Dir(c.Listen), "seccomp"),
)
for name := range c.Runtimes {
if c.Runtimes[name].seccompConfig != nil {
c.Runtimes[name].seccompConfig.SetNotifierPath(
filepath.Join(filepath.Dir(c.Listen), "seccomp"),
)
}
}
if err := c.ImageConfig.Validate(onExecution); err != nil {
return fmt.Errorf("validating image config: %w", err)
}
if err := c.NetworkConfig.Validate(onExecution); err != nil {
return fmt.Errorf("validating network config: %w", err)
}
if err := c.APIConfig.Validate(onExecution); err != nil {
return fmt.Errorf("validating api config: %w", err)
}
if !c.SELinux {
selinux.SetDisabled()
}
if err := c.NRI.Validate(onExecution); err != nil {
return fmt.Errorf("validating NRI config: %w", err)
}
return nil
}
// Validate is the main entry point for API configuration validation.
// The parameter `onExecution` specifies if the validation should include
// execution checks. It returns an `error` on validation failure, otherwise
// `nil`.
func (c *APIConfig) Validate(onExecution bool) error {
if c.GRPCMaxSendMsgSize <= 0 {
c.GRPCMaxSendMsgSize = defaultGRPCMaxMsgSize
}
if c.GRPCMaxRecvMsgSize <= 0 {
c.GRPCMaxRecvMsgSize = defaultGRPCMaxMsgSize
}
if c.StreamEnableTLS {
if c.StreamTLSCert == "" {
return errors.New("stream TLS cert path is empty")
}
if c.StreamTLSKey == "" {
return errors.New("stream TLS key path is empty")
}
}
if onExecution {
return RemoveUnusedSocket(c.Listen)
}
return nil
}
// RemoveUnusedSocket first ensures that the path to the socket exists and
// removes unused socket connections if available.
func RemoveUnusedSocket(path string) error {
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return fmt.Errorf("creating socket directories: %w", err)
}
// Remove the socket if it already exists
if _, err := os.Stat(path); err == nil {
if _, err := net.DialTimeout("unix", path, 0); err == nil {
return fmt.Errorf("already existing connection on %s", path)
}
if err := os.Remove(path); err != nil {
return fmt.Errorf("removing %s: %w", path, err)
}
}
return nil
}
// Validate is the main entry point for root configuration validation.
// The parameter `onExecution` specifies if the validation should include
// execution checks. It returns an `error` on validation failure, otherwise
// `nil`.
func (c *RootConfig) Validate(onExecution bool) error {
if onExecution {
if !filepath.IsAbs(c.LogDir) {
return errors.New("log_dir is not an absolute path")
}
if err := os.MkdirAll(c.LogDir, 0o700); err != nil {
return fmt.Errorf("invalid log_dir: %w", err)
}
store, err := c.GetStore()
if err != nil {
return fmt.Errorf("failed to get store to set defaults: %w", err)
}
// This step merges the /etc/container/storage.conf with the
// storage configuration in crio.conf
// If we don't do this step, we risk returning the incorrect info
// on Inspect (/info) requests
c.RunRoot = store.RunRoot()
c.Root = store.GraphRoot()
c.Storage = store.GraphDriverName()
c.StorageOptions = store.GraphOptions()
c.pullOptions = store.PullOptions()
}
return nil
}
func (c *RootConfig) CleanShutdownSupportedFileName() string {
return c.CleanShutdownFile + ".supported"
}
// Validate is the main entry point for runtime configuration validation
// The parameter `onExecution` specifies if the validation should include
// execution checks. It returns an `error` on validation failure, otherwise
// `nil`.
func (c *RuntimeConfig) Validate(systemContext *types.SystemContext, onExecution bool) error {
if err := c.ulimitsConfig.LoadUlimits(c.DefaultUlimits); err != nil {
return err
}
if err := c.deviceConfig.LoadDevices(c.AdditionalDevices); err != nil {
return err
}
if err := c.ValidateDefaultRuntime(); err != nil {
return err
}
if c.Timezone != "" && !strings.EqualFold(c.Timezone, "local") {
_, err := time.LoadLocation(c.Timezone)
if err != nil {
return fmt.Errorf("invalid timezone: %s", c.Timezone)
}
}
if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize {
return fmt.Errorf("log size max should be negative or >= %d", OCIBufSize)
}
// We need to ensure the container termination will be properly waited
// for by defining a minimal timeout value. This will prevent timeout
// value defined in the configuration file to be too low.
if c.CtrStopTimeout < defaultCtrStopTimeout {
c.CtrStopTimeout = defaultCtrStopTimeout
logrus.Warnf("Forcing ctr_stop_timeout to lowest possible value of %ds", c.CtrStopTimeout)
}
if _, err := c.Sysctls(); err != nil {
return fmt.Errorf("invalid default_sysctls: %w", err)
}
if err := c.DefaultCapabilities.Validate(); err != nil {
return fmt.Errorf("invalid capabilities: %w", err)
}
if c.InfraCtrCPUSet != "" {
set, err := cpuset.Parse(c.InfraCtrCPUSet)
if err != nil {
return fmt.Errorf("invalid infra_ctr_cpuset: %w", err)
}
executable, err := exec.LookPath(tasksetBinary)
if err != nil {
return fmt.Errorf("%q not found in $PATH: %w", tasksetBinary, err)
}
cmdrunner.PrependCommandsWith(executable, "--cpu-list", set.String())
}
if err := c.Workloads.Validate(); err != nil {
return fmt.Errorf("workloads validation: %w", err)
}
// check for validation on execution
if onExecution {
// First, configure cgroup manager so the values of the Runtime.MonitorCgroup can be validated
cgroupManager, err := cgmgr.SetCgroupManager(c.CgroupManagerName)
if err != nil {
return fmt.Errorf("unable to update cgroup manager: %w", err)
}
c.cgroupManager = cgroupManager
if err := c.ValidateRuntimes(); err != nil {
return fmt.Errorf("runtime validation: %w", err)
}
// Validate the system registries configuration
if _, err := sysregistriesv2.GetRegistries(systemContext); err != nil {
return fmt.Errorf("invalid registries: %w", err)
}
// we should use a hooks directory if
// it exists and is a directory
// it does not exist but can be created
// otherwise, we skip
hooksDirs := []string{}
for _, hooksDir := range c.HooksDir {
if err := utils.IsDirectory(hooksDir); err != nil {
if !os.IsNotExist(err) {
logrus.Warnf("Skipping invalid hooks directory: %s exists but is not a directory", hooksDir)
continue
}
if err := os.MkdirAll(hooksDir, 0o755); err != nil {
logrus.Debugf("Failed to create requested hooks dir: %v", err)
continue
}
}
logrus.Debugf("Using hooks directory: %s", hooksDir)
hooksDirs = append(hooksDirs, hooksDir)
continue
}
c.HooksDir = hooksDirs
if err := cdi.Configure(cdi.WithSpecDirs(c.CDISpecDirs...)); err != nil {
return err
}
// Validate the pinns path
if err := c.ValidatePinnsPath("pinns"); err != nil {
return fmt.Errorf("pinns validation: %w", err)
}
c.namespaceManager = nsmgr.New(c.NamespacesDir, c.PinnsPath)
if err := c.namespaceManager.Initialize(); err != nil {
return fmt.Errorf("initialize nsmgr: %w", err)
}
if c.EnableCriuSupport {
if err := validateCriuInPath(); err != nil {
c.EnableCriuSupport = false
logrus.Infof("Checkpoint/restore support disabled: CRIU binary not found int $PATH")
} else {
logrus.Infof("Checkpoint/restore support enabled")
}
} else {
logrus.Infof("Checkpoint/restore support disabled via configuration")
}
if c.SeccompProfile == "" {
if err := c.seccompConfig.LoadDefaultProfile(); err != nil {
return fmt.Errorf("unable to load default seccomp profile: %w", err)
}
} else if err := c.seccompConfig.LoadProfile(c.SeccompProfile); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("unable to load seccomp profile: %w", err)
}
// Fallback to the internal default in order not to break upgrade paths.
logrus.Info("Seccomp profile does not exist on disk, fallback to internal default profile")
if err := c.seccompConfig.LoadDefaultProfile(); err != nil {
return fmt.Errorf("unable to load default seccomp profile: %w", err)
}
}
if err := c.apparmorConfig.LoadProfile(c.ApparmorProfile); err != nil {
return fmt.Errorf("unable to load AppArmor profile: %w", err)
}
if err := c.blockioConfig.Load(c.BlockIOConfigFile); err != nil {
return fmt.Errorf("blockio configuration: %w", err)
}
c.blockioConfig.SetReload(c.BlockIOReload)
if err := c.rdtConfig.Load(c.RdtConfigFile); err != nil {
return fmt.Errorf("rdt configuration: %w", err)
}
}
if err := c.TranslateMonitorFields(onExecution); err != nil {
return fmt.Errorf("monitor fields translation: %w", err)
}
return nil
}
// ValidateDefaultRuntime ensures that the default runtime is set and valid.
func (c *RuntimeConfig) ValidateDefaultRuntime() error {
// If the default runtime is defined in the runtime entry table, then it is valid
if _, ok := c.Runtimes[c.DefaultRuntime]; ok {
return nil
}
// If a non-empty runtime does not exist in the runtime entry table, this is an error.
if c.DefaultRuntime != "" {
return fmt.Errorf("default_runtime set to %q, but no runtime entry table [crio.runtime.runtimes.%s] was found", c.DefaultRuntime, c.DefaultRuntime)
}
// Set the default runtime to "crun" if default_runtime is not set
logrus.Debugf("Defaulting to %q as the runtime since default_runtime is not set", DefaultRuntime)
// The default config sets crun and its path in the runtimes map, so check for that
// first. If it does not exist then we add runc + its path to the runtimes map.
if _, ok := c.Runtimes[DefaultRuntime]; !ok {
c.Runtimes[DefaultRuntime] = defaultRuntimeHandler(c.cgroupManager.IsSystemd())
}
// Set the DefaultRuntime to runc so we don't fail further along in the code
c.DefaultRuntime = DefaultRuntime
return nil
}
// getDefaultMonitorGroup checks which defaultmonitor group to use
// for cgroupfs it is empty.
func getDefaultMonitorGroup(isSystemd bool) string {
monitorGroup := ""
if isSystemd {
monitorGroup = defaultMonitorCgroup
}
return monitorGroup
}
func defaultRuntimeHandler(isSystemd bool) *RuntimeHandler {
return &RuntimeHandler{
RuntimeType: DefaultRuntimeType,
RuntimeRoot: DefaultRuntimeRoot,
AllowedAnnotations: []string{
annotations.OCISeccompBPFHookAnnotation,
annotations.DevicesAnnotation,
},
MonitorEnv: []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
},
ContainerMinMemory: units.BytesSize(defaultContainerMinMemoryCrun),
MonitorCgroup: getDefaultMonitorGroup(isSystemd),
ExecCPUAffinity: ExecCPUAffinityTypeDefault,
SeccompProfile: runtimeSeccompProfileDefault,
}
}
// ValidateRuntimes checks every runtime if its members are valid.
func (c *RuntimeConfig) ValidateRuntimes() error {
var failedValidation []string
// Update the default runtime paths in all runtimes that are asking for inheritance
for name := range c.Runtimes {
if !c.Runtimes[name].InheritDefaultRuntime {
continue
}
logrus.Infof("Inheriting runtime configuration %q from %q", name, c.DefaultRuntime)
c.Runtimes[name].RuntimePath = c.Runtimes[c.DefaultRuntime].RuntimePath
// An empty RuntimePath causes cri-o to look for a binary named `name`,
// but we inherit from the default - look for binary called c.DefaultRuntime
// The validator will check the binary is valid below.
if c.Runtimes[name].RuntimePath == "" {
executable, err := exec.LookPath(c.DefaultRuntime)
if err == nil {
c.Runtimes[name].RuntimePath = executable
}
}
c.Runtimes[name].RuntimeType = c.Runtimes[c.DefaultRuntime].RuntimeType
c.Runtimes[name].RuntimeConfigPath = c.Runtimes[c.DefaultRuntime].RuntimeConfigPath
c.Runtimes[name].RuntimeRoot = c.Runtimes[c.DefaultRuntime].RuntimeRoot
}
// Validate if runtime_path does exist for each runtime
for name, handler := range c.Runtimes {
if err := handler.Validate(name); err != nil {
if c.DefaultRuntime == name {
return err
}
logrus.Warnf("Runtime handler %q is being ignored due to: %v", name, err)
failedValidation = append(failedValidation, name)
}
}
for _, invalidHandlerName := range failedValidation {
delete(c.Runtimes, invalidHandlerName)
}
c.initializeRuntimeFeatures()
return nil
}
func (c *RuntimeConfig) initializeRuntimeFeatures() {
for name, handler := range c.Runtimes {
versionOutput, err := cmdrunner.CombinedOutput(handler.RuntimePath, "--version")
if err != nil {
logrus.Errorf("Unable to determine version of runtime handler %q: %v", name, err)
continue
}
versionString := strings.ReplaceAll(strings.TrimSpace(string(versionOutput)), "\n", ", ")
logrus.Infof("Using runtime handler %s", versionString)
// If this returns an error, we just ignore it and assume the features sub-command is
// not supported by the runtime.
output, err := cmdrunner.CombinedOutput(handler.RuntimePath, "features")
if err != nil {
logrus.Errorf("Getting %s OCI runtime features failed: %s: %v", handler.RuntimePath, output, err)
continue
}
// Ignore error if we can't load runtime features.
if err := handler.LoadRuntimeFeatures(output); err != nil {
logrus.Errorf("Unable to load OCI features for runtime handler %q: %v", name, err)
continue
}
if handler.RuntimeSupportsIDMap() {
logrus.Debugf("Runtime handler %q supports User and Group ID-mappings", name)
}
// Recursive Read-only (RRO) mounts require runtime handler support,
// such as runc v1.1 or crun v1.4. For Linux, the minimum kernel
// version 5.12 or a kernel with the necessary changes backported
// is required.
rro := handler.RuntimeSupportsMountFlag("rro")
if rro {
logrus.Debugf("Runtime handler %q supports Recursive Read-only (RRO) mounts", name)
// A given runtime might support Recursive Read-only (RRO) mounts,
// but the current kernel might not.
if err := checkKernelRROMountSupport(); err != nil {
logrus.Warnf("Runtime handler %q supports Recursive Read-only (RRO) mounts, but kernel does not: %v", name, err)
rro = false
}
}
handler.features.RecursiveReadOnlyMounts = rro
}
}
func (c *RuntimeConfig) TranslateMonitorFields(onExecution bool) error {
for name, handler := range c.Runtimes {
if handler.RuntimeType == DefaultRuntimeType || handler.RuntimeType == "" {
if err := c.TranslateMonitorFieldsForHandler(handler, onExecution); err != nil {
return fmt.Errorf("failed to translate monitor fields for runtime %s: %w", name, err)
}
}
}
return nil
}
// TranslateMonitorFields is a transitional function that takes the configuration fields
// previously held by the RuntimeConfig that are being moved inside of the runtime handler structure.
func (c *RuntimeConfig) TranslateMonitorFieldsForHandler(handler *RuntimeHandler, onExecution bool) error {
if c.ConmonCgroup != "" {
logrus.Debugf("Monitor cgroup %s is becoming %s", handler.MonitorCgroup, c.ConmonCgroup)
handler.MonitorCgroup = c.ConmonCgroup
}
if c.Conmon != "" {
logrus.Debugf("Monitor path %s is becoming %s", handler.MonitorPath, c.Conmon)
handler.MonitorPath = c.Conmon
}
if len(c.ConmonEnv) != 0 {
handler.MonitorEnv = c.ConmonEnv
}
// If systemd and empty, assume default
if c.cgroupManager.IsSystemd() && handler.MonitorCgroup == "" {
handler.MonitorCgroup = defaultMonitorCgroup
}
if onExecution {
if err := c.ValidateConmonPath("conmon", handler); err != nil {
return err
}
// if cgroupManager is cgroupfs
if !c.cgroupManager.IsSystemd() {
// handler.MonitorCgroup having value "" is valid
// but the default value system.slice is not
if handler.MonitorCgroup == defaultMonitorCgroup {
handler.MonitorCgroup = ""
}
if handler.MonitorCgroup != utils.PodCgroupName && handler.MonitorCgroup != "" {
return fmt.Errorf("cgroupfs manager conmon cgroup should be 'pod' or empty, but got: '%s'", handler.MonitorCgroup)
}
return nil
}
if handler.MonitorCgroup != utils.PodCgroupName && !strings.HasSuffix(handler.MonitorCgroup, ".slice") {
return errors.New("conmon cgroup should be 'pod' or a systemd slice")
}
}
return nil
}
// ValidateConmonPath checks if `Conmon` is set within the `RuntimeConfig`.
// If this is not the case, it tries to find it within the $PATH variable.
// In any other case, it simply checks if `Conmon` is a valid file.
func (c *RuntimeConfig) ValidateConmonPath(executable string, handler *RuntimeHandler) error {
var err error
handler.MonitorPath, err = validateExecutablePath(executable, handler.MonitorPath)
if err != nil {
return err
}
c.conmonManager, err = conmonmgr.New(handler.MonitorPath)
return err
}
func (c *RuntimeConfig) ConmonSupportsSync() bool {
return c.conmonManager.SupportsSync()
}
func (c *RuntimeConfig) ConmonSupportsLogGlobalSizeMax() bool {
return c.conmonManager.SupportsLogGlobalSizeMax()
}
func validateCriuInPath() error {
_, err := validateExecutablePath("criu", "")
return err
}
// Seccomp returns the seccomp configuration.
func (c *RuntimeConfig) Seccomp() *seccomp.Config {
return c.seccompConfig
}
// AppArmor returns the AppArmor configuration.
func (c *RuntimeConfig) AppArmor() *apparmor.Config {
return c.apparmorConfig
}
// BlockIO returns the blockio configuration.
func (c *RuntimeConfig) BlockIO() *blockio.Config {
return c.blockioConfig
}
// Rdt returns the RDT configuration.
func (c *RuntimeConfig) Rdt() *rdt.Config {
return c.rdtConfig
}
// CgroupManager returns the CgroupManager configuration.
func (c *RuntimeConfig) CgroupManager() cgmgr.CgroupManager {
return c.cgroupManager
}
// NamespaceManager returns the NamespaceManager configuration.
func (c *RuntimeConfig) NamespaceManager() *nsmgr.NamespaceManager {
return c.namespaceManager
}
// Ulimits returns the Ulimits configuration.
func (c *RuntimeConfig) Ulimits() []ulimits.Ulimit {
return c.ulimitsConfig.Ulimits()
}
func (c *RuntimeConfig) Devices() []device.Device {
return c.deviceConfig.Devices()
}
func (c *RuntimeConfig) CheckpointRestore() bool {
return c.EnableCriuSupport
}
func validateExecutablePath(executable, currentPath string) (string, error) {
if currentPath == "" {
path, err := exec.LookPath(executable)
if err != nil {
return "", err
}
logrus.Debugf("Using %s from $PATH: %s", executable, path)
return path, nil
}
if _, err := os.Stat(currentPath); err != nil {
return "", fmt.Errorf("invalid %s path: %w", executable, err)
}
logrus.Infof("Using %s executable: %s", executable, currentPath)
return currentPath, nil
}
// Validate is the main entry point for image configuration validation.
// It returns an error on validation failure, otherwise nil.
func (c *ImageConfig) Validate(onExecution bool) error {
for key, value := range map[string]string{
"signature policy": c.SignaturePolicyDir,
"namespaced auth": c.NamespacedAuthDir,
} {
if !filepath.IsAbs(value) {
return fmt.Errorf("%s dir %q is not absolute", key, value)
}
if onExecution {
if err := os.MkdirAll(value, 0o755); err != nil {
return fmt.Errorf("cannot create %s dir: %w", key, err)
}
}
}
if _, err := c.ParsePauseImage(); err != nil {
return fmt.Errorf("invalid pause image %q: %w", c.PauseImage, err)
}
switch c.ShortNameMode {
case "enforcing", "disabled", "":
default:
return fmt.Errorf("invalid short name mode %q", c.ShortNameMode)
}
return nil
}
// ParsePauseImage parses the .PauseImage value as into a validated, well-typed value.
func (c *ImageConfig) ParsePauseImage() (references.RegistryImageReference, error) {
return references.ParseRegistryImageReferenceFromOutOfProcessData(c.PauseImage)
}
// Validate is the main entry point for network configuration validation.
// The parameter `onExecution` specifies if the validation should include
// execution checks. It returns an `error` on validation failure, otherwise
// `nil`.
func (c *NetworkConfig) Validate(onExecution bool) error {
if onExecution {
err := utils.IsDirectory(c.NetworkDir)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(c.NetworkDir, 0o755); err != nil {
return fmt.Errorf("cannot create network_dir: %s: %w", c.NetworkDir, err)
}
} else {
return fmt.Errorf("invalid network_dir: %s: %w", c.NetworkDir, err)
}
}
for _, pluginDir := range c.PluginDirs {
if err := os.MkdirAll(pluginDir, 0o755); err != nil {
return fmt.Errorf("invalid plugin_dirs entry: %w", err)
}
}
// While the plugin_dir option is being deprecated, we need this check
if c.PluginDir != "" {
logrus.Warnf("The config field plugin_dir is being deprecated. Please use plugin_dirs instead")
if err := os.MkdirAll(c.PluginDir, 0o755); err != nil {
return fmt.Errorf("invalid plugin_dir entry: %w", err)
}
// Append PluginDir to PluginDirs, so from now on we can operate in terms of PluginDirs and not worry
// about missing cases.
c.PluginDirs = append(c.PluginDirs, c.PluginDir)
// Empty the pluginDir so on future config calls we don't print it out
// thus seamlessly transitioning and depreciating the option
c.PluginDir = ""
}
// Init CNI plugin
cniManager, err := cnimgr.New(
c.CNIDefaultNetwork, c.NetworkDir, c.PluginDirs...,
)
if err != nil {
return fmt.Errorf("initialize CNI plugin: %w", err)
}
c.cniManager = cniManager
}
return nil
}
// Validate checks if the whole runtime is valid.
func (r *RuntimeHandler) Validate(name string) error {
if err := r.ValidateRuntimeType(name); err != nil {
return err
}
if err := r.ValidateRuntimePath(name); err != nil {
return err
}
if err := r.ValidateRuntimeConfigPath(name); err != nil {
return err
}
if err := r.ValidateRuntimeAllowedAnnotations(); err != nil {
return err
}
if err := r.ValidateContainerMinMemory(name); err != nil {
logrus.Errorf("Unable to set minimum container memory for runtime handler %q: %v", name, err)
}
if err := r.ValidateNoSyncLog(); err != nil {
return fmt.Errorf("no sync log: %w", err)
}
if err := r.ValidateWebsocketStreaming(name); err != nil {
return fmt.Errorf("websocket streaming: %w", err)
}
if err := r.validateRuntimeExecCPUAffinity(); err != nil {
return err
}
if err := r.validateRuntimeSeccompProfile(); err != nil {
return err
}
return nil
}
func (r *RuntimeHandler) ValidateRuntimeVMBinaryPattern() bool {
if r.RuntimeType != RuntimeTypeVM {
return true
}
binaryName := filepath.Base(r.RuntimePath)
matched, err := regexp.MatchString(RuntimeTypeVMBinaryPattern, binaryName)
if err != nil {
return false
}
return matched
}
// ValidateRuntimePath checks if the `RuntimePath` is either set or available
// within the $PATH environment. The method fails on any `RuntimePath` lookup
// error.
func (r *RuntimeHandler) ValidateRuntimePath(name string) error {
if r.RuntimePath == "" {
executable, err := exec.LookPath(name)
if err != nil {
return fmt.Errorf("%q not found in $PATH: %w", name, err)
}
r.RuntimePath = executable
logrus.Debugf("Using runtime executable from $PATH %q", executable)
} else if _, err := os.Stat(r.RuntimePath); err != nil && os.IsNotExist(err) {
return fmt.Errorf("invalid runtime_path for runtime '%s': %w", name, err)
}
ok := r.ValidateRuntimeVMBinaryPattern()
if !ok {
return fmt.Errorf("invalid runtime_path for runtime '%s': containerd binary naming pattern is not followed",
name)
}
logrus.Debugf(
"Found valid runtime %q for runtime_path %q", name, r.RuntimePath,
)
return nil
}
// ValidateRuntimeType checks if the `RuntimeType` is valid.
func (r *RuntimeHandler) ValidateRuntimeType(name string) error {
if r.RuntimeType != "" && r.RuntimeType != DefaultRuntimeType && r.RuntimeType != RuntimeTypeVM && r.RuntimeType != RuntimeTypePod {
return fmt.Errorf("invalid `runtime_type` %q for runtime %q",
r.RuntimeType, name)
}
return nil
}
// ValidateRuntimeConfigPath checks if the `RuntimeConfigPath` exists.
func (r *RuntimeHandler) ValidateRuntimeConfigPath(name string) error {
if r.RuntimeConfigPath == "" {
return nil
}
if r.RuntimeType != RuntimeTypeVM {
return errors.New("runtime_config_path can only be used with the 'vm' runtime type")
}
if _, err := os.Stat(r.RuntimeConfigPath); err != nil && os.IsNotExist(err) {
return fmt.Errorf("invalid runtime_config_path for runtime '%s': %w", name, err)
}
return nil
}
func (r *RuntimeHandler) ValidateRuntimeAllowedAnnotations() error {
disallowed, err := validateAllowedAndGenerateDisallowedAnnotations(r.AllowedAnnotations)
if err != nil {
return err
}
logrus.Debugf(
"Allowed annotations for runtime: %v", r.AllowedAnnotations,
)
r.DisallowedAnnotations = disallowed
return nil
}
// ValidateNoSyncLog checks if the `NoSyncLog` is used with the correct `RuntimeType` ('oci').
func (r *RuntimeHandler) ValidateNoSyncLog() error {
if !r.NoSyncLog {
return nil
}
// no_sync_log can only be used with the 'oci' runtime type.
// This means that the runtime type must be set to 'oci' or left empty
if r.RuntimeType == DefaultRuntimeType || r.RuntimeType == "" {
logrus.Warn("NoSyncLog is enabled. This can lead to lost log data")
return nil
}
return fmt.Errorf("no_sync_log is only allowed with runtime type 'oci', runtime type is '%s'", r.RuntimeType)
}
// ValidateContainerMinMemory sets the minimum container memory for a given runtime.
// assigns defaultContainerMinMemory if no container_min_memory provided.
func (r *RuntimeHandler) ValidateContainerMinMemory(name string) error {
if r.ContainerMinMemory == "" {
r.ContainerMinMemory = units.BytesSize(defaultContainerMinMemory)
}
memorySize, err := units.RAMInBytes(r.ContainerMinMemory)
if err != nil {
err = fmt.Errorf("unable to set runtime memory to %q: %w. Setting to %q instead", r.ContainerMinMemory, err, defaultContainerMinMemory)
// Fallback to default value if something is wrong with the configured value.
r.ContainerMinMemory = units.BytesSize(defaultContainerMinMemory)
return err
}
logrus.Debugf("Runtime handler %q container minimum memory set to %d bytes", name, memorySize)
return nil
}
// ValidateWebsocketStreaming can be used to verify if the runtime supports WebSocket streaming.
func (r *RuntimeHandler) ValidateWebsocketStreaming(name string) error {
if r.RuntimeType != RuntimeTypePod {
if r.StreamWebsockets {
return fmt.Errorf(`only the 'runtime_type = "pod"' supports websocket streaming, not %q (runtime %q)`, r.RuntimeType, name)
}
return nil
}
// Requires at least conmon-rs v0.7.0
v, err := conmonrsClient.Version(r.MonitorPath)
if err != nil {
if errors.Is(err, conmonrsClient.ErrUnsupported) {
logrus.Debugf("Unable to verify pod runtime version: %v", err)
// Streaming server support got introduced in v0.7.0
if r.StreamWebsockets {
logrus.Warnf("Disabling streaming over websockets, it requires conmon-rs >= v0.7.0")
r.StreamWebsockets = false
}
return nil
}
return fmt.Errorf("get conmon-rs version: %w", err)
}
if v.Tag == "" {
v.Tag = "none"
}
logrus.Infof(
"Runtime handler %q is using conmon-rs version: %s, tag: %s, commit: %s, build: %s, target: %s, %s, %s",
name, v.Version, v.Tag, v.Commit, v.BuildDate, v.Target, v.RustVersion, v.CargoVersion,
)
return nil
}
// LoadRuntimeFeatures loads features for a given runtime handler using the "features"
// sub-command output, where said output contains a JSON document called "Features
// Structure" that describes the runtime handler's supported features.
func (r *RuntimeHandler) LoadRuntimeFeatures(input []byte) error {
if err := json.Unmarshal(input, &r.features); err != nil {
return fmt.Errorf("unable to unmarshal features structure: %w", err)
}
// All other properties of the Features Structure are optional and might be
// either absent, empty, or set to the null value, with the exception of
// OCIVersionMin and OCIVersionMax, which are required. Thus, the lack of
// them should indicate that the Features Structure document is potentially
// not valid.
//
// See the following for more details about the Features Structure:
// https://github.com/opencontainers/runtime-spec/blob/main/features.md
if r.features.OCIVersionMin == "" || r.features.OCIVersionMax == "" {
return errors.New("runtime features structure is not valid")
}
return nil
}
// RuntimeSupportsIDMap returns whether this runtime supports the "runtime features"
// command, and that the output of that command advertises IDMap mounts as an option.
func (r *RuntimeHandler) RuntimeSupportsIDMap() bool {
if r.features.Linux == nil || r.features.Linux.MountExtensions == nil || r.features.Linux.MountExtensions.IDMap == nil {
return false
}
if enabled := r.features.Linux.MountExtensions.IDMap.Enabled; enabled == nil || !*enabled {
return false
}
return true
}
// RuntimeSupportsRROMounts returns whether this runtime supports the Recursive Read-only mount as an option.
func (r *RuntimeHandler) RuntimeSupportsRROMounts() bool {
return r.features.RecursiveReadOnlyMounts
}
// RuntimeSupportsMountFlag returns whether this runtime supports the specified mount option.
func (r *RuntimeHandler) RuntimeSupportsMountFlag(flag string) bool {
return slices.Contains(r.features.MountOptions, flag)
}
// RuntimeDefaultAnnotations returns the default annotations for this handler.
func (r *RuntimeHandler) RuntimeDefaultAnnotations() map[string]string {
return r.DefaultAnnotations
}
// RuntimeStreamWebsockets returns the configured websocket streaming option for this handler.
func (r *RuntimeHandler) RuntimeStreamWebsockets() bool {
return r.StreamWebsockets
}
// RuntimeSeccomp returns the configuration of the loaded seccomp profile for this handler.
func (r *RuntimeHandler) RuntimeSeccomp() *seccomp.Config {
return r.seccompConfig
}
// validateRuntimeExecCPUAffinity checks if the RuntimeHandler enforces proper CPU affinity settings.
func (r *RuntimeHandler) validateRuntimeExecCPUAffinity() error {
switch r.ExecCPUAffinity {
case ExecCPUAffinityTypeDefault, ExecCPUAffinityTypeFirst:
return nil
}
return fmt.Errorf("invalid exec_cpu_affinity %q", r.ExecCPUAffinity)
}
// validateRuntimeSeccompProfile tries to load the RuntimeHandler seccomp profile.
func (r *RuntimeHandler) validateRuntimeSeccompProfile() error {
if r.SeccompProfile == "" {
r.seccompConfig = nil
return nil
}
r.seccompConfig = seccomp.New()
if err := r.seccompConfig.LoadProfile(r.SeccompProfile); err != nil {
return fmt.Errorf("unable to load runtime handler seccomp profile: %w", err)
}
return nil
}
func validateAllowedAndGenerateDisallowedAnnotations(allowed []string) (disallowed []string, _ error) {
disallowedMap := make(map[string]bool)
for _, ann := range annotations.AllAllowedAnnotations {
disallowedMap[ann] = false
}
for _, ann := range allowed {
if _, ok := disallowedMap[ann]; !ok {
return nil, fmt.Errorf("invalid allowed_annotation: %s", ann)
}
disallowedMap[ann] = true
}
disallowed = make([]string, 0, len(disallowedMap))
for ann, allowed := range disallowedMap {
if !allowed {
disallowed = append(disallowed, ann)
}
}
return disallowed, nil
}
// CNIPlugin returns the network configuration CNI plugin.
func (c *NetworkConfig) CNIPlugin() ocicni.CNIPlugin {
return c.cniManager.Plugin()
}
// CNIPluginReadyOrError returns whether the cni plugin is ready.
func (c *NetworkConfig) CNIPluginReadyOrError() error {
return c.cniManager.ReadyOrError()
}
// CNIPluginAddWatcher returns the network configuration CNI plugin.
func (c *NetworkConfig) CNIPluginAddWatcher() chan bool {
return c.cniManager.AddWatcher()
}
// CNIPluginGC calls the plugin's GC to clean up any resources concerned with
// stale pods (pod other than the ones provided by validPodList). The call to
// the plugin will be deferred until it is ready logging any errors then and
// returning nil error here.
func (c *Config) CNIPluginGC(ctx context.Context, validPodList cnimgr.PodNetworkLister) error {
return c.cniManager.GC(ctx, validPodList)
}
// CNIManagerShutdown shuts down the CNI Manager.
func (c *NetworkConfig) CNIManagerShutdown() {
c.cniManager.Shutdown()
}
// SetSingleConfigPath set single config path for config.
func (c *Config) SetSingleConfigPath(singleConfigPath string) {
c.singleConfigPath = singleConfigPath
}
package config
import (
"context"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"github.com/sirupsen/logrus"
"os"
)
func FuzzLoadConfig(data []byte) int {
logrus.SetLevel(logrus.ErrorLevel)
c, err := DefaultConfig()
if err != nil {
return 0
}
f := fuzz.NewConsumer(data)
confBytes, err := f.GetBytes()
if err != nil {
return 0
}
randomFile, err := os.Create("cri-o.config")
if err != nil {
return 0
}
defer os.Remove("cri-o.config")
_, err = randomFile.Write(confBytes)
if err != nil {
randomFile.Close()
return 0
}
if err = c.UpdateFromFile(context.Background(), "cri-o.config"); err != nil {
randomFile.Close()
return 0
}
if err = c.Validate(false); err != nil {
randomFile.Close()
return 0
}
devNullFile, err := os.Open(os.DevNull)
if err != nil {
randomFile.Close()
return 0
}
if err = c.WriteTemplate(true, devNullFile); err != nil {
return 0
}
randomFile.Close()
devNullFile.Close()
return 1
}
package config
import (
"errors"
"fmt"
"os"
"sync"
"github.com/containers/storage/pkg/parsers/kernel"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// Defaults if none are specified.
const (
DefaultRuntime = "crun"
DefaultRuntimeType = "oci"
DefaultRuntimeRoot = "/run/crun"
defaultMonitorCgroup = "system.slice"
// ImageVolumesBind option is for using bind mounted volumes.
ImageVolumesBind ImageVolumesType = "bind"
// DefaultPauseImage is default pause image.
DefaultPauseImage string = "registry.k8s.io/pause:3.10.1"
)
var (
kernelRROSupportOnce sync.Once
kernelRROSupportError error
)
func selinuxEnabled() bool {
return selinux.GetEnabled()
}
func (c *RuntimeConfig) ValidatePinnsPath(executable string) error {
var err error
c.PinnsPath, err = validateExecutablePath(executable, c.PinnsPath)
return err
}
// checkKernelRROMountSupport checks the kernel support for the Recursive Read-only (RRO) mounts.
func checkKernelRROMountSupport() error {
kernelRROSupportOnce.Do(func() {
kernelRROSupportError = (func() error {
// Check the current kernel version for RRO mounts support...
err := validateKernelRROVersion()
if err != nil {
versionErr := err
// ... and if the kernel version does not match the minimum required version,
// then verify whether the kernel supports RRO mounts regardless, as often
// Linux distributions provide heavily patched kernel releases, and the
// current kernel might include backported support.
err = validateKernelRROMount()
if err != nil {
err = fmt.Errorf("%w: %w", versionErr, err)
}
}
return err
})()
})
return kernelRROSupportError
}
// validateKernelRROVersion checks whether the current kernel version matches the release 5.12 or newer,
// which is the minimum required kernel version that supports Recursive Read-only (RRO) mounts.
func validateKernelRROVersion() error {
kv, err := kernel.GetKernelVersion()
if err != nil {
return fmt.Errorf("unable to retrieve kernel version: %w", err)
}
result := kernel.CompareKernelVersion(*kv,
kernel.VersionInfo{
Kernel: 5,
Major: 12,
Minor: 0,
},
)
if result < 0 {
return fmt.Errorf("kernel version %q does not support recursive read-only mounts", kv)
}
return nil
}
// validateKernelRROMount checks whether the current kernel can support Recursive Read-only mounts.
// It uses a test mount of tmpfs against which an attempt will be made to set the required attributes.
// If there is no failure in doing so, then the kernel has the required support.
func validateKernelRROMount() error {
path, err := os.MkdirTemp("", "crio-rro-*")
if err != nil {
return fmt.Errorf("unable to create directory: %w", err)
}
defer func() {
if err := os.RemoveAll(path); err != nil {
logrus.Errorf("Unable to remove directory: %v", err)
}
}()
for {
err = unix.Mount("", path, "tmpfs", 0, "")
if !errors.Is(err, unix.EINTR) {
break
}
}
if err != nil {
return fmt.Errorf("unable to mount directory %q using tmpfs: %w", path, err)
}
defer func() {
var unmountErr error
for {
unmountErr = unix.Unmount(path, 0)
if !errors.Is(unmountErr, unix.EINTR) {
break
}
}
if unmountErr != nil {
logrus.Errorf("Unable to unmount directory %q: %v", path, unmountErr)
}
}()
for {
err = unix.MountSetattr(-1, path, unix.AT_RECURSIVE,
&unix.MountAttr{
Attr_set: unix.MOUNT_ATTR_RDONLY,
},
)
if !errors.Is(err, unix.EINTR) {
break
}
}
if err != nil {
if !errors.Is(err, unix.ENOSYS) {
return fmt.Errorf("unable to set mount attribute for directory %q: %w", path, err)
}
return fmt.Errorf("unable to set recursive read-only mount attribute: %w", err)
}
return nil
}
// All *_inject.go files are meant to be used by tests only. Purpose of this
// files is to provide a way to inject mocked data into the current setup.
package config
import (
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/cri-o/cri-o/internal/config/cnimgr"
"github.com/cri-o/cri-o/internal/config/nsmgr"
)
// SetCNIPlugin sets the network plugin for the Configuration. The function
// errors if a sane shutdown of the initially created network plugin failed.
func (c *Config) SetCNIPlugin(plugin ocicni.CNIPlugin) error {
if c.cniManager == nil {
c.cniManager = &cnimgr.CNIManager{}
}
return c.cniManager.SetCNIPlugin(plugin)
}
// SetNamespaceManager sets the namespaceManager for the Configuration.
func (c *Config) SetNamespaceManager(nsMgr *nsmgr.NamespaceManager) {
c.namespaceManager = nsMgr
}
// SetCheckpointRestore offers the possibility to turn on and
// turn off CheckpointRestore support for testing.
func (c *RuntimeConfig) SetCheckpointRestore(cr bool) {
c.EnableCriuSupport = cr
}
package config
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/sirupsen/logrus"
"tags.cncf.io/container-device-interface/pkg/cdi"
"github.com/cri-o/cri-o/internal/log"
)
// Reload reloads the configuration for the single crio.conf and the drop-in
// configuration directory.
func (c *Config) Reload(ctx context.Context) error {
log.Infof(ctx, "Reloading configuration")
// Reload the config
newConfig, err := DefaultConfig()
if err != nil {
return fmt.Errorf("unable to create default config: %w", err)
}
if _, err := os.Stat(c.singleConfigPath); !os.IsNotExist(err) {
if err := newConfig.UpdateFromFile(ctx, c.singleConfigPath); err != nil {
return fmt.Errorf("update config from single file: %w", err)
}
} else {
log.Infof(ctx, "Skipping not-existing config file %q", c.singleConfigPath)
}
if _, err := os.Stat(c.dropInConfigDir); !os.IsNotExist(err) {
if err := newConfig.UpdateFromPath(ctx, c.dropInConfigDir); err != nil {
return fmt.Errorf("update config from path: %w", err)
}
} else {
log.Infof(ctx, "Skipping not-existing config path %q", c.dropInConfigDir)
}
// Reload all available options
if err := c.ReloadLogLevel(newConfig); err != nil {
return err
}
if err := c.ReloadLogFilter(newConfig); err != nil {
return err
}
if err := c.ReloadPauseImage(newConfig); err != nil {
return err
}
c.ReloadPinnedImages(newConfig)
if err := c.ReloadRegistries(); err != nil {
return err
}
c.ReloadDecryptionKeyConfig(newConfig)
if err := c.ReloadSeccompProfile(newConfig); err != nil {
return err
}
if err := c.ReloadAppArmorProfile(newConfig); err != nil {
return err
}
if err := c.ReloadBlockIOConfig(newConfig); err != nil {
return err
}
if err := c.ReloadRdtConfig(newConfig); err != nil {
return err
}
if err := c.ReloadRuntimes(newConfig); err != nil {
return err
}
if err := cdi.Configure(cdi.WithSpecDirs(newConfig.CDISpecDirs...)); err != nil {
return err
}
return nil
}
// logConfig logs a config set operation as with info verbosity. Please always
// use this function for setting configuration options to ensure consistent
// log outputs.
func logConfig(option, value string) {
logrus.Infof("Set config %s to %q", option, value)
}
// ReloadLogLevel updates the LogLevel with the provided `newConfig`. It errors
// if the level is not parsable.
func (c *Config) ReloadLogLevel(newConfig *Config) error {
if c.LogLevel != newConfig.LogLevel {
level, err := logrus.ParseLevel(newConfig.LogLevel)
if err != nil {
return err
}
// Always log this message without considering the current
logrus.SetLevel(logrus.InfoLevel)
logConfig("log_level", newConfig.LogLevel)
logrus.SetLevel(level)
c.LogLevel = newConfig.LogLevel
}
return nil
}
// ReloadLogFilter updates the LogFilter with the provided `newConfig`. It errors
// if the filter is not applicable.
func (c *Config) ReloadLogFilter(newConfig *Config) error {
if c.LogFilter != newConfig.LogFilter {
hook, err := log.NewFilterHook(newConfig.LogFilter)
if err != nil {
return err
}
logger := logrus.StandardLogger()
log.RemoveHook(logger, "FilterHook")
logConfig("log_filter", newConfig.LogFilter)
logger.AddHook(hook)
c.LogFilter = newConfig.LogFilter
}
return nil
}
func (c *Config) ReloadPauseImage(newConfig *Config) error {
if c.PauseImage != newConfig.PauseImage {
if _, err := newConfig.ParsePauseImage(); err != nil {
return err
}
c.PauseImage = newConfig.PauseImage
logConfig("pause_image", c.PauseImage)
}
if c.PauseImageAuthFile != newConfig.PauseImageAuthFile {
if newConfig.PauseImageAuthFile != "" {
if _, err := os.Stat(newConfig.PauseImageAuthFile); err != nil {
return err
}
}
c.PauseImageAuthFile = newConfig.PauseImageAuthFile
logConfig("pause_image_auth_file", c.PauseImageAuthFile)
}
if c.PauseCommand != newConfig.PauseCommand {
c.PauseCommand = newConfig.PauseCommand
logConfig("pause_command", c.PauseCommand)
}
return nil
}
// ReloadPinnedImages replace the PinnedImages
// with the provided `newConfig.PinnedImages`.
// The method skips empty items and prints a log message.
func (c *Config) ReloadPinnedImages(newConfig *Config) {
if len(newConfig.PinnedImages) == 0 {
c.PinnedImages = []string{}
logConfig("pinned_images", "[]")
return
}
if cmp.Equal(c.PinnedImages, newConfig.PinnedImages,
cmpopts.SortSlices(func(a, b string) bool {
return a < b
}),
) {
return
}
pinnedImages := []string{}
for _, img := range newConfig.PinnedImages {
if img != "" {
pinnedImages = append(pinnedImages, img)
}
}
logConfig("pinned_images", strings.Join(pinnedImages, ","))
c.PinnedImages = pinnedImages
}
// ReloadRegistries reloads the registry configuration from the Configs
// `SystemContext`. The method errors in case of any update failure.
func (c *Config) ReloadRegistries() error {
registries, err := sysregistriesv2.TryUpdatingCache(c.SystemContext)
if err != nil {
return fmt.Errorf(
"system registries reload failed: %s: %w",
sysregistriesv2.ConfigPath(c.SystemContext),
err,
)
}
logrus.Infof("Applied new registry configuration: %+v", registries)
return nil
}
// ReloadDecryptionKeyConfig updates the DecryptionKeysPath with the provided
// `newConfig`.
func (c *Config) ReloadDecryptionKeyConfig(newConfig *Config) {
if c.DecryptionKeysPath != newConfig.DecryptionKeysPath {
logConfig("decryption_keys_path", newConfig.DecryptionKeysPath)
c.DecryptionKeysPath = newConfig.DecryptionKeysPath
}
}
// ReloadSeccompProfile reloads the seccomp profile from the new config if
// their paths differ.
func (c *Config) ReloadSeccompProfile(newConfig *Config) error {
// Reload the seccomp profile in any case because its content could have
// changed as well
if newConfig.SeccompProfile == "" {
if err := c.seccompConfig.LoadDefaultProfile(); err != nil {
return fmt.Errorf("unable to load default seccomp profile: %w", err)
}
} else if err := c.seccompConfig.LoadProfile(newConfig.SeccompProfile); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("unable to load seccomp profile: %w", err)
}
logrus.Info("Seccomp profile does not exist on disk, fallback to internal default profile")
if err := c.seccompConfig.LoadDefaultProfile(); err != nil {
return fmt.Errorf("unable to load default seccomp profile: %w", err)
}
}
c.SeccompProfile = newConfig.SeccompProfile
logConfig("seccomp_profile", c.SeccompProfile)
c.PrivilegedSeccompProfile = newConfig.PrivilegedSeccompProfile
logConfig("privileged_seccomp_profile", c.PrivilegedSeccompProfile)
return nil
}
// ReloadAppArmorProfile reloads the AppArmor profile from the new config if
// they differ.
func (c *Config) ReloadAppArmorProfile(newConfig *Config) error {
if c.ApparmorProfile != newConfig.ApparmorProfile {
if err := c.AppArmor().LoadProfile(newConfig.ApparmorProfile); err != nil {
return fmt.Errorf("unable to reload apparmor_profile: %w", err)
}
c.ApparmorProfile = newConfig.ApparmorProfile
logConfig("apparmor_profile", c.ApparmorProfile)
}
return nil
}
// ReloadBlockIOConfig reloads the blockio configuration from the new config.
func (c *Config) ReloadBlockIOConfig(newConfig *Config) error {
if c.BlockIOConfigFile != newConfig.BlockIOConfigFile {
if err := c.BlockIO().Load(newConfig.BlockIOConfigFile); err != nil {
return fmt.Errorf("unable to reload blockio_config_file: %w", err)
}
c.BlockIOConfigFile = newConfig.BlockIOConfigFile
logConfig("blockio_config_file", c.BlockIOConfigFile)
}
if c.BlockIOReload != newConfig.BlockIOReload {
c.BlockIOReload = newConfig.BlockIOReload
logConfig("blockio_reload", strconv.FormatBool(c.BlockIOReload))
}
return nil
}
// ReloadRdtConfig reloads the RDT configuration if changed.
func (c *Config) ReloadRdtConfig(newConfig *Config) error {
if c.RdtConfigFile != newConfig.RdtConfigFile {
if err := c.Rdt().Load(newConfig.RdtConfigFile); err != nil {
return fmt.Errorf("unable to reload rdt_config_file: %w", err)
}
c.RdtConfigFile = newConfig.RdtConfigFile
logConfig("rdt_config_file", c.RdtConfigFile)
}
return nil
}
// ReloadRuntimes reloads the runtimes configuration if changed.
func (c *Config) ReloadRuntimes(newConfig *Config) error {
var updated bool
if !RuntimesEqual(c.Runtimes, newConfig.Runtimes) {
logrus.Infof("Updating runtime configuration")
c.Runtimes = newConfig.Runtimes
updated = true
}
if c.DefaultRuntime != newConfig.DefaultRuntime {
c.DefaultRuntime = newConfig.DefaultRuntime
if err := c.ValidateDefaultRuntime(); err != nil {
return fmt.Errorf("unable to reload runtimes: %w", err)
}
logConfig("default_runtime", c.DefaultRuntime)
updated = true
}
if !updated {
return nil
}
if err := c.ValidateRuntimes(); err != nil {
return fmt.Errorf("unable to reload runtimes: %w", err)
}
for name := range c.Runtimes {
if c.Runtimes[name].seccompConfig != nil {
c.Runtimes[name].seccompConfig.SetNotifierPath(
filepath.Join(filepath.Dir(c.Listen), "seccomp"),
)
}
}
return nil
}
package config
import (
"fmt"
"strings"
)
func NewSysctl(key, value string) *Sysctl {
return &Sysctl{key, value}
}
// Sysctl is a generic abstraction over key value based sysctls.
type Sysctl struct {
key, value string
}
// Key returns the key of the sysctl (key=value format).
func (s *Sysctl) Key() string {
return s.key
}
// Value returns the value of the sysctl (key=value format).
func (s *Sysctl) Value() string {
return s.value
}
// Sysctls returns the parsed sysctl slice and an error if not parsable
// Some validation based on https://github.com/containers/common/blob/main/pkg/sysctl/sysctl.go
func (c *RuntimeConfig) Sysctls() ([]Sysctl, error) {
sysctls := make([]Sysctl, 0, len(c.DefaultSysctls))
for _, sysctl := range c.DefaultSysctls {
// skip empty values for sake of backwards compatibility
if sysctl == "" {
continue
}
split := strings.SplitN(sysctl, "=", 2)
if len(split) != 2 {
return nil, fmt.Errorf("%q is not in key=value format", sysctl)
}
// pinns nor runc expect sysctls of the form 'key = value', but rather
// 'key=value'
trimmed := strings.TrimSpace(split[0]) + "=" + strings.TrimSpace(split[1])
if trimmed != sysctl {
return nil, fmt.Errorf("'%s' is invalid, extra spaces found: format should be key=value", sysctl)
}
sysctls = append(sysctls, Sysctl{key: split[0], value: split[1]})
}
return sysctls, nil
}
// Namespace represents a kernel namespace name.
type Namespace string
const (
// IpcNamespace is the Linux IPC namespace.
IpcNamespace = Namespace("ipc")
// NetNamespace is the network namespace.
NetNamespace = Namespace("net")
)
var namespaces = map[string]Namespace{
"kernel.sem": IpcNamespace,
}
var prefixNamespaces = map[string]Namespace{
"kernel.shm": IpcNamespace,
"kernel.msg": IpcNamespace,
"fs.mqueue.": IpcNamespace,
"net.": NetNamespace,
}
// Validate checks that a sysctl is whitelisted because it is known to be
// namespaced by the Linux kernel. The parameters hostNet and hostIPC are used
// to forbid sysctls for pod sharing the respective namespaces with the host.
// This check is only used on sysctls defined by the user in the crio.conf
// file.
func (s *Sysctl) Validate(hostNet, hostIPC bool) error {
nsErrorFmt := "%q not allowed with host %s enabled"
if ns, found := namespaces[s.Key()]; found {
if ns == IpcNamespace && hostIPC {
return fmt.Errorf(nsErrorFmt, s.Key(), ns)
}
return nil
}
for p, ns := range prefixNamespaces {
if strings.HasPrefix(s.Key(), p) {
if ns == IpcNamespace && hostIPC {
return fmt.Errorf(nsErrorFmt, s.Key(), ns)
}
if ns == NetNamespace && hostNet {
return fmt.Errorf(nsErrorFmt, s.Key(), ns)
}
return nil
}
}
return fmt.Errorf("%s not whitelisted", s.Key())
}
package config
import (
"io"
"reflect"
"slices"
"strings"
"text/template"
)
// WriteTemplate write the configuration template to the provided writer.
func (c *Config) WriteTemplate(displayAllConfig bool, w io.Writer) error {
const templateName = "config"
tpl, err := template.New(templateName).Parse(assembleTemplateString(displayAllConfig, c))
if err != nil {
return err
}
return tpl.ExecuteTemplate(w, templateName, c)
}
func assembleTemplateString(displayAllConfig bool, c *Config) string {
crioTemplateConfig, err := initCrioTemplateConfig(c)
if err != nil {
return ""
}
templateString := ""
// [crio] configuration
templateString += crioTemplateString(crioRootConfig, "", displayAllConfig, crioTemplateConfig)
// [crio.api] configuration
templateString += crioTemplateString(crioAPIConfig, templateStringCrioAPI, displayAllConfig, crioTemplateConfig)
// [crio.runtime] configuration
templateString += crioTemplateString(crioRuntimeConfig, templateStringCrioRuntime, displayAllConfig, crioTemplateConfig)
// [crio.image] configuration
templateString += crioTemplateString(crioImageConfig, templateStringCrioImage, displayAllConfig, crioTemplateConfig)
// [crio.network] configuration
templateString += crioTemplateString(crioNetworkConfig, templateStringCrioNetwork, displayAllConfig, crioTemplateConfig)
// [crio.metrics] configuration
templateString += crioTemplateString(crioMetricsConfig, templateStringCrioMetrics, displayAllConfig, crioTemplateConfig)
// [crio.tracing] configuration
templateString += crioTemplateString(crioTracingConfig, templateStringCrioTracing, displayAllConfig, crioTemplateConfig)
// [crio.nri] configuration
templateString += crioTemplateString(crioNRIConfig, templateStringCrioNRI, displayAllConfig, crioTemplateConfig)
// [crio.stats] configuration
templateString += crioTemplateString(crioStatsConfig, templateStringCrioStats, displayAllConfig, crioTemplateConfig)
if templateString != "" {
templateString = templateStringPrefix + templateStringCrio + templateString
}
return templateString
}
func crioTemplateString(group templateGroup, prefix string, displayAll bool, crioTemplateConfig []*templateConfigValue) string {
templateString := ""
for _, configItem := range crioTemplateConfig {
if group == configItem.group {
if !configItem.isDefaultValue || displayAll {
templateString += strings.ReplaceAll(configItem.templateString, "{{ $.Comment }}", "")
} else {
templateString += configItem.templateString
}
}
}
if templateString != "" {
templateString = prefix + templateString
}
return templateString
}
type templateGroup int32
const (
crioRootConfig templateGroup = iota + 1
crioAPIConfig
crioRuntimeConfig
crioImageConfig
crioNetworkConfig
crioMetricsConfig
crioTracingConfig
crioStatsConfig
crioNRIConfig
)
type templateConfigValue struct {
templateString string
group templateGroup
isDefaultValue bool
}
func initCrioTemplateConfig(c *Config) ([]*templateConfigValue, error) {
dc, err := DefaultConfig()
if err != nil {
return nil, err
}
crioTemplateConfig := []*templateConfigValue{
{
templateString: templateStringCrioRoot,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.Root, c.Root),
},
{
templateString: templateStringCrioRunroot,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.RunRoot, c.RunRoot),
},
{
templateString: templateStringCrioImageStore,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.ImageStore, c.ImageStore),
},
{
templateString: templateStringCrioStorageDriver,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.Storage, c.Storage),
},
{
templateString: templateStringCrioStorageOption,
group: crioRootConfig,
isDefaultValue: slices.Equal(dc.StorageOptions, c.StorageOptions),
},
{
templateString: templateStringCrioLogDir,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.LogDir, c.LogDir),
},
{
templateString: templateStringCrioVersionFile,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.VersionFile, c.VersionFile),
},
{
templateString: templateStringCrioVersionFilePersist,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.VersionFilePersist, c.VersionFilePersist),
},
{
templateString: templateStringCrioInternalWipe,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.InternalWipe, c.InternalWipe),
},
{
templateString: templateStringCrioInternalRepair,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.InternalRepair, c.InternalRepair),
},
{
templateString: templateStringCrioCleanShutdownFile,
group: crioRootConfig,
isDefaultValue: simpleEqual(dc.CleanShutdownFile, c.CleanShutdownFile),
},
{
templateString: templateStringCrioAPIListen,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.Listen, c.Listen),
},
{
templateString: templateStringCrioAPIStreamAddress,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.StreamAddress, c.StreamAddress),
},
{
templateString: templateStringCrioAPIStreamPort,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.StreamPort, c.StreamPort),
},
{
templateString: templateStringCrioAPIStreamEnableTLS,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.StreamEnableTLS, c.StreamEnableTLS),
},
{
templateString: templateStringCrioAPIStreamIdleTimeout,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.StreamIdleTimeout, c.StreamIdleTimeout),
},
{
templateString: templateStringCrioAPIStreamTLSCert,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.StreamTLSCert, c.StreamTLSCert),
},
{
templateString: templateStringCrioAPIStreamTLSKey,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.StreamTLSKey, c.StreamTLSKey),
},
{
templateString: templateStringCrioAPIStreamTLSCa,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.StreamTLSCA, c.StreamTLSCA),
},
{
templateString: templateStringCrioAPIGrpcMaxSendMsgSize,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.GRPCMaxSendMsgSize, c.GRPCMaxSendMsgSize),
},
{
templateString: templateStringCrioAPIGrpcMaxRecvMsgSize,
group: crioAPIConfig,
isDefaultValue: simpleEqual(dc.GRPCMaxRecvMsgSize, c.GRPCMaxRecvMsgSize),
},
{
templateString: templateStringCrioRuntimeDefaultUlimits,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.DefaultUlimits, c.DefaultUlimits),
},
{
templateString: templateStringCrioRuntimeNoPivot,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.NoPivot, c.NoPivot),
},
{
templateString: templateStringCrioRuntimeDecryptionKeysPath,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.DecryptionKeysPath, c.DecryptionKeysPath),
},
{
templateString: templateStringCrioRuntimeConmon,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.Conmon, c.Conmon),
},
{
templateString: templateStringCrioRuntimeConmonCgroup,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.ConmonCgroup, c.ConmonCgroup),
},
{
templateString: templateStringCrioRuntimeConmonEnv,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.ConmonEnv, c.ConmonEnv),
},
{
templateString: templateStringCrioRuntimeDefaultEnv,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.DefaultEnv, c.DefaultEnv),
},
{
templateString: templateStringCrioRuntimeSelinux,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.SELinux, c.SELinux),
},
{
templateString: templateStringCrioRuntimeSeccompProfile,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.SeccompProfile, c.SeccompProfile),
},
{
templateString: templateStringCrioRuntimePrivilegedSeccompProfile,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.PrivilegedSeccompProfile, c.PrivilegedSeccompProfile),
},
{
templateString: templateStringCrioRuntimeApparmorProfile,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.ApparmorProfile, c.ApparmorProfile),
},
{
templateString: templateStringCrioRuntimeBlockIOConfigFile,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.BlockIOConfigFile, c.BlockIOConfigFile),
},
{
templateString: templateStringCrioRuntimeBlockIOReload,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.BlockIOReload, c.BlockIOReload),
},
{
templateString: templateStringCrioRuntimeIrqBalanceConfigFile,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.IrqBalanceConfigFile, c.IrqBalanceConfigFile),
},
{
templateString: templateStringCrioRuntimeIrqBalanceConfigRestoreFile,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.IrqBalanceConfigRestoreFile, c.IrqBalanceConfigRestoreFile),
},
{
templateString: templateStringCrioRuntimeRdtConfigFile,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.RdtConfigFile, c.RdtConfigFile),
},
{
templateString: templateStringCrioRuntimeCgroupManager,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.CgroupManagerName, c.CgroupManagerName),
},
{
templateString: templateStringCrioRuntimeSeparatePullCgroup,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.SeparatePullCgroup, c.SeparatePullCgroup),
},
{
templateString: templateStringCrioRuntimeDefaultCapabilities,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.DefaultCapabilities, c.DefaultCapabilities),
},
{
templateString: templateStringCrioRuntimeAddInheritableCapabilities,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.AddInheritableCapabilities, c.AddInheritableCapabilities),
},
{
templateString: templateStringCrioRuntimeDefaultSysctls,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.DefaultSysctls, c.DefaultSysctls),
},
{
templateString: templateStringCrioRuntimeAllowedDevices,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.AllowedDevices, c.AllowedDevices),
},
{
templateString: templateStringCrioRuntimeAdditionalDevices,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.AdditionalDevices, c.AdditionalDevices),
},
{
templateString: templateStringCrioRuntimeCDISpecDirs,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.CDISpecDirs, c.CDISpecDirs),
},
{
templateString: templateStringCrioRuntimeDeviceOwnershipFromSecurityContext,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.DeviceOwnershipFromSecurityContext, c.DeviceOwnershipFromSecurityContext),
},
{
templateString: templateStringCrioRuntimeHooksDir,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.HooksDir, c.HooksDir),
},
{
templateString: templateStringCrioRuntimeDefaultMountsFile,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.DefaultMountsFile, c.DefaultMountsFile),
},
{
templateString: templateStringCrioRuntimePidsLimit,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.PidsLimit, c.PidsLimit),
},
{
templateString: templateStringCrioRuntimeLogSizeMax,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.LogSizeMax, c.LogSizeMax),
},
{
templateString: templateStringCrioRuntimeLogToJournald,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.LogToJournald, c.LogToJournald),
},
{
templateString: templateStringCrioRuntimeContainerExitsDir,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.ContainerExitsDir, c.ContainerExitsDir),
},
{
templateString: templateStringCrioRuntimeContainerAttachSocketDir,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.ContainerAttachSocketDir, c.ContainerAttachSocketDir),
},
{
templateString: templateStringCrioRuntimeBindMountPrefix,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.BindMountPrefix, c.BindMountPrefix),
},
{
templateString: templateStringCrioRuntimeReadOnly,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.ReadOnly, c.ReadOnly),
},
{
templateString: templateStringCrioRuntimeLogLevel,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.LogLevel, c.LogLevel),
},
{
templateString: templateStringCrioRuntimeLogFilter,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.LogFilter, c.LogFilter),
},
{
templateString: templateStringCrioRuntimeUIDMappings,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.UIDMappings, c.UIDMappings),
},
{
templateString: templateStringCrioRuntimeGIDMappings,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.GIDMappings, c.GIDMappings),
},
{
templateString: templateStringCrioRuntimeMinimumMappableUID,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.MinimumMappableUID, c.MinimumMappableUID),
},
{
templateString: templateStringCrioRuntimeMinimumMappableGID,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.MinimumMappableGID, c.MinimumMappableGID),
},
{
templateString: templateStringCrioRuntimeCtrStopTimeout,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.CtrStopTimeout, c.CtrStopTimeout),
},
{
templateString: templateStringCrioRuntimeDropInfraCtr,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.DropInfraCtr, c.DropInfraCtr),
},
{
templateString: templateStringCrioRuntimeInfraCtrCpuset,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.InfraCtrCPUSet, c.InfraCtrCPUSet),
},
{
templateString: templateStringCrioRuntimeSharedCpuset,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.SharedCPUSet, c.SharedCPUSet),
},
{
templateString: templateStringCrioRuntimeNamespacesDir,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.NamespacesDir, c.NamespacesDir),
},
{
templateString: templateStringCrioRuntimePinnsPath,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.PinnsPath, c.PinnsPath),
},
{
templateString: templateStringCrioRuntimeEnableCriuSupport,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.EnableCriuSupport, c.EnableCriuSupport),
},
{
templateString: templateStringCrioRuntimeEnablePodEvents,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.EnablePodEvents, c.EnablePodEvents),
},
{
templateString: templateStringCrioRuntimeDefaultRuntime,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.DefaultRuntime, c.DefaultRuntime),
},
{
templateString: templateStringCrioRuntimeAbsentMountSourcesToReject,
group: crioRuntimeConfig,
isDefaultValue: slices.Equal(dc.AbsentMountSourcesToReject, c.AbsentMountSourcesToReject),
},
{
templateString: templateStringCrioRuntimeRuntimesRuntimeHandler,
group: crioRuntimeConfig,
isDefaultValue: RuntimesEqual(dc.Runtimes, c.Runtimes),
},
{
templateString: templateStringCrioRuntimeWorkloads,
group: crioRuntimeConfig,
isDefaultValue: WorkloadsEqual(dc.Workloads, c.Workloads),
},
{
templateString: templateStringCrioRuntimeHostNetworkDisableSELinux,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.HostNetworkDisableSELinux, c.HostNetworkDisableSELinux),
},
{
templateString: templateStringCrioRuntimeDisableHostPortMapping,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.DisableHostPortMapping, c.DisableHostPortMapping),
},
{
templateString: templateStringCrioRuntimeTimezone,
group: crioRuntimeConfig,
isDefaultValue: simpleEqual(dc.Timezone, c.Timezone),
},
{
templateString: templateStringCrioImageDefaultTransport,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.DefaultTransport, c.DefaultTransport),
},
{
templateString: templateStringCrioImageGlobalAuthFile,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.GlobalAuthFile, c.GlobalAuthFile),
},
{
templateString: templateStringCrioImagePauseImage,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.PauseImage, c.PauseImage),
},
{
templateString: templateStringCrioImagePauseImageAuthFile,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.PauseImageAuthFile, c.PauseImageAuthFile),
},
{
templateString: templateStringCrioImagePauseCommand,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.PauseCommand, c.PauseCommand),
},
{
templateString: templateStringCrioImagePinnedImages,
group: crioImageConfig,
isDefaultValue: slices.Equal(dc.PinnedImages, c.PinnedImages),
},
{
templateString: templateStringCrioImageSignaturePolicy,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.SignaturePolicyPath, c.SignaturePolicyPath),
},
{
templateString: templateStringCrioImageSignaturePolicyDir,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.SignaturePolicyDir, c.SignaturePolicyDir),
},
{
templateString: templateStringCrioImageInsecureRegistries,
group: crioImageConfig,
isDefaultValue: slices.Equal(dc.InsecureRegistries, c.InsecureRegistries),
},
{
templateString: templateStringCrioImageImageVolumes,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.ImageVolumes, c.ImageVolumes),
},
{
templateString: templateStringCrioImageBigFilesTemporaryDir,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.BigFilesTemporaryDir, c.BigFilesTemporaryDir),
},
{
templateString: templateStringCrioImageAutoReloadRegistries,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.AutoReloadRegistries, c.AutoReloadRegistries),
},
{
templateString: templateStringCrioImagePullProgressTimeout,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.PullProgressTimeout, c.PullProgressTimeout),
},
{
templateString: templateStringCrioImageShortNameMode,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.ShortNameMode, c.ShortNameMode),
},
{
templateString: templateStringOCIArtifactMountSupport,
group: crioImageConfig,
isDefaultValue: simpleEqual(dc.OCIArtifactMountSupport, c.OCIArtifactMountSupport),
},
{
templateString: templateStringCrioNetworkCniDefaultNetwork,
group: crioNetworkConfig,
isDefaultValue: simpleEqual(dc.CNIDefaultNetwork, c.CNIDefaultNetwork),
},
{
templateString: templateStringCrioNetworkNetworkDir,
group: crioNetworkConfig,
isDefaultValue: simpleEqual(dc.NetworkDir, c.NetworkDir),
},
{
templateString: templateStringCrioNetworkPluginDirs,
group: crioNetworkConfig,
isDefaultValue: slices.Equal(dc.PluginDirs, c.PluginDirs),
},
{
templateString: templateStringCrioMetricsEnableMetrics,
group: crioMetricsConfig,
isDefaultValue: simpleEqual(dc.EnableMetrics, c.EnableMetrics),
},
{
templateString: templateStringCrioMetricsCollectors,
group: crioMetricsConfig,
isDefaultValue: slices.Equal(dc.MetricsCollectors.ToSlice(), c.MetricsCollectors.ToSlice()),
},
{
templateString: templateStringCrioMetricsMetricsHost,
group: crioMetricsConfig,
isDefaultValue: simpleEqual(dc.MetricsHost, c.MetricsHost),
},
{
templateString: templateStringCrioMetricsMetricsPort,
group: crioMetricsConfig,
isDefaultValue: simpleEqual(dc.MetricsPort, c.MetricsPort),
},
{
templateString: templateStringCrioMetricsMetricsSocket,
group: crioMetricsConfig,
isDefaultValue: simpleEqual(dc.MetricsSocket, c.MetricsSocket),
},
{
templateString: templateStringCrioMetricsMetricsCert,
group: crioMetricsConfig,
isDefaultValue: simpleEqual(dc.MetricsCert, c.MetricsCert),
},
{
templateString: templateStringCrioMetricsMetricsKey,
group: crioMetricsConfig,
isDefaultValue: simpleEqual(dc.MetricsKey, c.MetricsKey),
},
{
templateString: templateStringCrioTracingEnableTracing,
group: crioTracingConfig,
isDefaultValue: simpleEqual(dc.EnableTracing, c.EnableTracing),
},
{
templateString: templateStringCrioTracingTracingEndpoint,
group: crioTracingConfig,
isDefaultValue: simpleEqual(dc.TracingEndpoint, c.TracingEndpoint),
},
{
templateString: templateStringCrioTracingTracingSamplingRatePerMillion,
group: crioTracingConfig,
isDefaultValue: simpleEqual(dc.TracingSamplingRatePerMillion, c.TracingSamplingRatePerMillion),
},
{
templateString: templateStringCrioStatsStatsCollectionPeriod,
group: crioStatsConfig,
isDefaultValue: simpleEqual(dc.StatsCollectionPeriod, c.StatsCollectionPeriod),
},
{
templateString: templateStringCrioStatsCollectionPeriod,
group: crioStatsConfig,
isDefaultValue: simpleEqual(dc.CollectionPeriod, c.CollectionPeriod),
},
{
templateString: templateStringCrioStatsIncludedPodMetrics,
group: crioNetworkConfig,
isDefaultValue: slices.Equal(dc.IncludedPodMetrics, c.IncludedPodMetrics),
},
{
templateString: templateStringCrioNRIEnable,
group: crioNRIConfig,
isDefaultValue: simpleEqual(dc.NRI.Enabled, c.NRI.Enabled),
},
{
templateString: templateStringCrioNRISocketPath,
group: crioNRIConfig,
isDefaultValue: simpleEqual(dc.NRI.SocketPath, c.NRI.SocketPath),
},
{
templateString: templateStringCrioNRIPluginDir,
group: crioNRIConfig,
isDefaultValue: simpleEqual(dc.NRI.PluginPath, c.NRI.PluginPath),
},
{
templateString: templateStringCrioNRIPluginConfigDir,
group: crioNRIConfig,
isDefaultValue: simpleEqual(dc.NRI.PluginPath, c.NRI.PluginPath),
},
{
templateString: templateStringCrioNRIDisableConnections,
group: crioNRIConfig,
isDefaultValue: simpleEqual(dc.NRI.DisableConnections, c.NRI.DisableConnections),
},
{
templateString: templateStringCrioNRIPluginRegistrationTimeout,
group: crioNRIConfig,
isDefaultValue: simpleEqual(dc.NRI.PluginRegistrationTimeout, c.NRI.PluginRegistrationTimeout),
},
{
templateString: templateStringCrioNRIPluginRequestTimeout,
group: crioNRIConfig,
isDefaultValue: simpleEqual(dc.NRI.PluginRequestTimeout, c.NRI.PluginRequestTimeout),
},
{
templateString: templateStringCrioNRIDefaultValidator,
group: crioNRIConfig,
isDefaultValue: dc.NRI.IsDefaultValidatorDefaultConfig(),
},
}
return crioTemplateConfig, nil
}
func simpleEqual(a, b any) bool {
return a == b
}
func RuntimesEqual(a, b Runtimes) bool {
if len(a) != len(b) {
return false
}
for key, valueA := range a {
valueB, ok := b[key]
if !ok {
return false
}
if !reflect.DeepEqual(valueA, valueB) {
return false
}
}
return true
}
func WorkloadsEqual(a, b Workloads) bool {
if len(a) != len(b) {
return false
}
for key, valueA := range a {
valueB, ok := b[key]
if !ok {
return false
}
if !reflect.DeepEqual(valueA, valueB) {
return false
}
}
return true
}
const templateStringPrefix = `# The CRI-O configuration file specifies all of the available configuration
# options and command-line flags for the crio(8) OCI Kubernetes Container Runtime
# daemon, but in a TOML format that can be more easily modified and versioned.
#
# Please refer to crio.conf(5) for details of all configuration options.
# CRI-O supports partial configuration reload during runtime, which can be
# done by sending SIGHUP to the running process. Currently supported options
# are explicitly mentioned with: 'This option supports live configuration
# reload'.
# CRI-O reads its storage defaults from the containers-storage.conf(5) file
# located at /etc/containers/storage.conf. Modify this storage configuration if
# you want to change the system's defaults. If you want to modify storage just
# for CRI-O, you can change the storage configuration options here.
`
const templateStringCrio = `[crio]
`
const templateStringCrioRoot = `# Path to the "root directory". CRI-O stores all of its data, including
# containers images, in this directory.
{{ $.Comment }}root = "{{ .Root }}"
`
const templateStringCrioRunroot = `# Path to the "run directory". CRI-O stores all of its state in this directory.
{{ $.Comment }}runroot = "{{ .RunRoot }}"
`
const templateStringCrioImageStore = `# Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root.
{{ $.Comment }}imagestore = "{{ .ImageStore }}"
`
const templateStringCrioStorageDriver = `# Storage driver used to manage the storage of images and containers. Please
# refer to containers-storage.conf(5) to see all available storage drivers.
{{ $.Comment }}storage_driver = "{{ .Storage }}"
`
const templateStringCrioStorageOption = `# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
{{ $.Comment }}storage_option = [
{{ range $opt := .StorageOptions }}{{ $.Comment }}{{ printf "\t%q,\n" $opt }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioLogDir = `# The default log directory where all logs will go unless directly specified by
# the kubelet. The log directory specified must be an absolute directory.
{{ $.Comment }}log_dir = "{{ .LogDir }}"
`
const templateStringCrioVersionFile = `# Location for CRI-O to lay down the temporary version file.
# It is used to check if crio wipe should wipe containers, which should
# always happen on a node reboot
{{ $.Comment }}version_file = "{{ .VersionFile }}"
`
const templateStringCrioVersionFilePersist = `# Location for CRI-O to lay down the persistent version file.
# It is used to check if crio wipe should wipe images, which should
# only happen when CRI-O has been upgraded
{{ $.Comment }}version_file_persist = "{{ .VersionFilePersist }}"
`
const templateStringCrioCleanShutdownFile = `# Location for CRI-O to lay down the clean shutdown file.
# It is used to check whether crio had time to sync before shutting down.
# If not found, crio wipe will clear the storage directory.
{{ $.Comment }}clean_shutdown_file = "{{ .CleanShutdownFile }}"
`
const templateStringCrioInternalWipe = `# InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts.
# If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations.
{{ $.Comment }}internal_wipe = {{ .InternalWipe }}
`
const templateStringCrioInternalRepair = `# InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart.
# If it was, CRI-O also attempts to repair the storage.
{{ $.Comment }}internal_repair = {{ .InternalRepair }}
`
const templateStringOCIArtifactMountSupport = `# OCIArtifactMountSupport is whether CRI-O should support OCI artifacts.
# If set to false, mounting OCI Artifacts will result in an error.
{{ $.Comment }}oci_artifact_mount_support = {{ .OCIArtifactMountSupport }}
`
const templateStringCrioAPI = `# The crio.api table contains settings for the kubelet/gRPC interface.
[crio.api]
`
const templateStringCrioAPIListen = `# Path to AF_LOCAL socket on which CRI-O will listen.
{{ $.Comment }}listen = "{{ .Listen }}"
`
const templateStringCrioAPIStreamAddress = `# IP address on which the stream server will listen.
{{ $.Comment }}stream_address = "{{ .StreamAddress }}"
`
const templateStringCrioAPIStreamPort = `# The port on which the stream server will listen. If the port is set to "0", then
# CRI-O will allocate a random free port number.
{{ $.Comment }}stream_port = "{{ .StreamPort }}"
`
const templateStringCrioAPIStreamEnableTLS = `# Enable encrypted TLS transport of the stream server.
{{ $.Comment }}stream_enable_tls = {{ .StreamEnableTLS }}
`
const templateStringCrioAPIStreamIdleTimeout = `# Length of time until open streams terminate due to lack of activity
{{ $.Comment }}stream_idle_timeout = "{{.StreamIdleTimeout}}"
`
const templateStringCrioAPIStreamTLSCert = `# Path to the x509 certificate file used to serve the encrypted stream. This
# file can change, and CRI-O will automatically pick up the changes.
{{ $.Comment }}stream_tls_cert = "{{ .StreamTLSCert }}"
`
const templateStringCrioAPIStreamTLSKey = `# Path to the key file used to serve the encrypted stream. This file can
# change and CRI-O will automatically pick up the changes.
{{ $.Comment }}stream_tls_key = "{{ .StreamTLSKey }}"
`
const templateStringCrioAPIStreamTLSCa = `# Path to the x509 CA(s) file used to verify and authenticate client
# communication with the encrypted stream. This file can change and CRI-O will
# automatically pick up the changes.
{{ $.Comment }}stream_tls_ca = "{{ .StreamTLSCA }}"
`
const templateStringCrioAPIGrpcMaxSendMsgSize = `# Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024.
{{ $.Comment }}grpc_max_send_msg_size = {{ .GRPCMaxSendMsgSize }}
`
const templateStringCrioAPIGrpcMaxRecvMsgSize = `# Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024.
{{ $.Comment }}grpc_max_recv_msg_size = {{ .GRPCMaxRecvMsgSize }}
`
const templateStringCrioRuntime = `# The crio.runtime table contains settings pertaining to the OCI runtime used
# and options for how to set up and manage the OCI runtime.
[crio.runtime]
`
const templateStringCrioRuntimeDefaultUlimits = `# A list of ulimits to be set in containers by default, specified as
# "<ulimit name>=<soft limit>:<hard limit>", for example:
# "nofile=1024:2048"
# If nothing is set here, settings will be inherited from the CRI-O daemon
{{ $.Comment }}default_ulimits = [
{{ range $ulimit := .DefaultUlimits }}{{ $.Comment }}{{ printf "\t%q,\n" $ulimit }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeNoPivot = `# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
{{ $.Comment }}no_pivot = {{ .NoPivot }}
`
const templateStringCrioRuntimeDecryptionKeysPath = `# decryption_keys_path is the path where the keys required for
# image decryption are stored. This option supports live configuration reload.
{{ $.Comment }}decryption_keys_path = "{{ .DecryptionKeysPath }}"
`
const templateStringCrioRuntimeConmon = `# Path to the conmon binary, used for monitoring the OCI runtime.
# Will be searched for using $PATH if empty.
# This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
{{ $.Comment }}conmon = "{{ .Conmon }}"
`
const templateStringCrioRuntimeConmonCgroup = `# Cgroup setting for conmon
# This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup.
{{ $.Comment }}conmon_cgroup = "{{ .ConmonCgroup }}"
`
const templateStringCrioRuntimeConmonEnv = `# Environment variable list for the conmon process, used for passing necessary
# environment variables to conmon or the runtime.
# This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
{{ $.Comment }}conmon_env = [
{{ range $env := .ConmonEnv }}{{ $.Comment }}{{ printf "\t%q,\n" $env }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeDefaultEnv = `# Additional environment variables to set for all the
# containers. These are overridden if set in the
# container image spec or in the container runtime configuration.
{{ $.Comment }}default_env = [
{{ range $env := .DefaultEnv }}{{ $.Comment }}{{ printf "\t%q,\n" $env }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeSelinux = `# If true, SELinux will be used for pod separation on the host.
# This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future.
{{ $.Comment }}selinux = {{ .SELinux }}
`
const templateStringCrioRuntimeSeccompProfile = `# Path to the seccomp.json profile which is used as the default seccomp profile
# for the runtime. If not specified or set to "", then the internal default seccomp profile will be used.
# This option supports live configuration reload.
{{ $.Comment }}seccomp_profile = "{{ .SeccompProfile }}"
`
const templateStringCrioRuntimePrivilegedSeccompProfile = `# Enable a seccomp profile for privileged containers from the local path.
# This option supports live configuration reload.
{{ $.Comment }}privileged_seccomp_profile = "{{ .PrivilegedSeccompProfile }}"
`
const templateStringCrioRuntimeApparmorProfile = `# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default". This profile only takes effect if the user
# does not specify a profile via the Kubernetes Pod's metadata annotation. If
# the profile is set to "unconfined", then this equals to disabling AppArmor.
# This option supports live configuration reload.
{{ $.Comment }}apparmor_profile = "{{ .ApparmorProfile }}"
`
const templateStringCrioRuntimeBlockIOConfigFile = `# Path to the blockio class configuration file for configuring
# the cgroup blockio controller.
{{ $.Comment }}blockio_config_file = "{{ .BlockIOConfigFile }}"
`
const templateStringCrioRuntimeBlockIOReload = `# Reload blockio-config-file and rescan blockio devices in the system before applying
# blockio parameters.
{{ $.Comment }}blockio_reload = {{ .BlockIOReload }}
`
const templateStringCrioRuntimeIrqBalanceConfigFile = `# Used to change irqbalance service config file path which is used for configuring
# irqbalance daemon.
{{ $.Comment }}irqbalance_config_file = "{{ .IrqBalanceConfigFile }}"
`
const templateStringCrioRuntimeRdtConfigFile = `# Path to the RDT configuration file for configuring the resctrl pseudo-filesystem.
# This option supports live configuration reload.
{{ $.Comment }}rdt_config_file = "{{ .RdtConfigFile }}"
`
const templateStringCrioRuntimeCgroupManager = `# Cgroup management implementation used for the runtime.
{{ $.Comment }}cgroup_manager = "{{ .CgroupManagerName }}"
`
const templateStringCrioRuntimeSeparatePullCgroup = `# Specify whether the image pull must be performed in a separate cgroup.
{{ $.Comment }}separate_pull_cgroup = "{{ .SeparatePullCgroup }}"
`
const templateStringCrioRuntimeDefaultCapabilities = `# List of default capabilities for containers. If it is empty or commented out,
# only the capabilities defined in the containers json file by the user/kube
# will be added.
{{ $.Comment }}default_capabilities = [
{{ range $capability := .DefaultCapabilities}}{{ $.Comment }}{{ printf "\t%q,\n" $capability}}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeAddInheritableCapabilities = `# Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective.
# If capabilities are expected to work for non-root users, this option should be set.
{{ $.Comment }}add_inheritable_capabilities = {{ .AddInheritableCapabilities }}
`
const templateStringCrioRuntimeDefaultSysctls = `# List of default sysctls. If it is empty or commented out, only the sysctls
# defined in the container json file by the user/kube will be added.
{{ $.Comment }}default_sysctls = [
{{ range $sysctl := .DefaultSysctls}}{{ $.Comment }}{{ printf "\t%q,\n" $sysctl}}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeAllowedDevices = `# List of devices on the host that a
# user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation.
{{ $.Comment }}allowed_devices = [
{{ range $device := .AllowedDevices}}{{ $.Comment }}{{ printf "\t%q,\n" $device}}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeAdditionalDevices = `# List of additional devices. specified as
# "<device-on-host>:<device-on-container>:<permissions>", for example: "--device=/dev/sdc:/dev/xvdc:rwm".
# If it is empty or commented out, only the devices
# defined in the container json file by the user/kube will be added.
{{ $.Comment }}additional_devices = [
{{ range $device := .AdditionalDevices}}{{ $.Comment }}{{ printf "\t%q,\n" $device}}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeCDISpecDirs = `# List of directories to scan for CDI Spec files.
{{ $.Comment }}cdi_spec_dirs = [
{{ range $dir := .CDISpecDirs }}{{ $.Comment }}{{ printf "\t%q,\n" $dir}}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeDeviceOwnershipFromSecurityContext = `# Change the default behavior of setting container devices uid/gid from CRI's
# SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid.
# Defaults to false.
{{ $.Comment }}device_ownership_from_security_context = {{ .DeviceOwnershipFromSecurityContext }}
`
const templateStringCrioRuntimeHooksDir = `# Path to OCI hooks directories for automatically executed hooks. If one of the
# directories does not exist, then CRI-O will automatically skip them.
{{ $.Comment }}hooks_dir = [
{{ range $hooksDir := .HooksDir }}{{ $.Comment }}{{ printf "\t%q,\n" $hooksDir}}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeDefaultMountsFile = `# Path to the file specifying the defaults mounts for each container. The
# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
# its default mounts from the following two files:
#
# 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the
# override file, where users can either add in their own default mounts, or
# override the default mounts shipped with the package.
#
# 2) /usr/share/containers/mounts.conf: This is the default file read for
# mounts. If you want CRI-O to read from a different, specific mounts file,
# you can change the default_mounts_file. Note, if this is done, CRI-O will
# only add mounts it finds in this file.
#
{{ $.Comment }}default_mounts_file = "{{ .DefaultMountsFile }}"
`
const templateStringCrioRuntimePidsLimit = `# Maximum number of processes allowed in a container.
# This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead.
{{ $.Comment }}pids_limit = {{ .PidsLimit }}
`
const templateStringCrioRuntimeLogSizeMax = `# Maximum sized allowed for the container log file. Negative numbers indicate
# that no size limit is imposed. If it is positive, it must be >= 8192 to
# match/exceed conmon's read buffer. The file is truncated and re-opened so the
# limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead.
{{ $.Comment }}log_size_max = {{ .LogSizeMax }}
`
const templateStringCrioRuntimeLogToJournald = `# Whether container output should be logged to journald in addition to the kubernetes log file
{{ $.Comment }}log_to_journald = {{ .LogToJournald }}
`
const templateStringCrioRuntimeContainerExitsDir = `# Path to directory in which container exit files are written to by conmon.
{{ $.Comment }}container_exits_dir = "{{ .ContainerExitsDir }}"
`
const templateStringCrioRuntimeContainerAttachSocketDir = `# Path to directory for container attach sockets.
{{ $.Comment }}container_attach_socket_dir = "{{ .ContainerAttachSocketDir }}"
`
const templateStringCrioRuntimeBindMountPrefix = `# The prefix to use for the source of the bind mounts.
{{ $.Comment }}bind_mount_prefix = ""
`
const templateStringCrioRuntimeReadOnly = `# If set to true, all containers will run in read-only mode.
{{ $.Comment }}read_only = {{ .ReadOnly }}
`
const templateStringCrioRuntimeLogLevel = `# Changes the verbosity of the logs based on the level it is set to. Options
# are fatal, panic, error, warn, info, debug and trace. This option supports
# live configuration reload.
{{ $.Comment }}log_level = "{{ .LogLevel }}"
`
const templateStringCrioRuntimeLogFilter = `# Filter the log messages by the provided regular expression.
# This option supports live configuration reload.
{{ $.Comment }}log_filter = "{{ .LogFilter }}"
`
const templateStringCrioRuntimeUIDMappings = `# The UID mappings for the user namespace of each container. A range is
# specified in the form containerUID:HostUID:Size. Multiple ranges must be
# separated by comma.
# This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future.
{{ $.Comment }}uid_mappings = "{{ .UIDMappings }}"
`
const templateStringCrioRuntimeGIDMappings = `# The GID mappings for the user namespace of each container. A range is
# specified in the form containerGID:HostGID:Size. Multiple ranges must be
# separated by comma.
# This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future.
{{ $.Comment }}gid_mappings = "{{ .GIDMappings }}"
`
const templateStringCrioRuntimeMinimumMappableUID = `# If set, CRI-O will reject any attempt to map host UIDs below this value
# into user namespaces. A negative value indicates that no minimum is set,
# so specifying mappings will only be allowed for pods that run as UID 0.
# This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future.
{{ $.Comment }}minimum_mappable_uid = {{ .MinimumMappableUID }}
`
const templateStringCrioRuntimeMinimumMappableGID = `# If set, CRI-O will reject any attempt to map host GIDs below this value
# into user namespaces. A negative value indicates that no minimum is set,
# so specifying mappings will only be allowed for pods that run as UID 0.
# This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future.
{{ $.Comment }}minimum_mappable_gid = {{ .MinimumMappableGID}}
`
const templateStringCrioRuntimeCtrStopTimeout = `# The minimal amount of time in seconds to wait before issuing a timeout
# regarding the proper termination of the container. The lowest possible
# value is 30s, whereas lower values are not considered by CRI-O.
{{ $.Comment }}ctr_stop_timeout = {{ .CtrStopTimeout }}
`
const templateStringCrioRuntimeDropInfraCtr = `# drop_infra_ctr determines whether CRI-O drops the infra container
# when a pod does not have a private PID namespace, and does not use
# a kernel separating runtime (like kata).
# It requires manage_ns_lifecycle to be true.
{{ $.Comment }}drop_infra_ctr = {{ .DropInfraCtr }}
`
const templateStringCrioRuntimeIrqBalanceConfigRestoreFile = `# irqbalance_config_restore_file allows to set a cpu mask CRI-O should
# restore as irqbalance config at startup. Set to empty string to disable this flow entirely.
# By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning.
{{ $.Comment }}irqbalance_config_restore_file = "{{ .IrqBalanceConfigRestoreFile }}"
`
const templateStringCrioRuntimeInfraCtrCpuset = `# infra_ctr_cpuset determines what CPUs will be used to run infra containers.
# You can use linux CPU list format to specify desired CPUs.
# To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus.
{{ $.Comment }}infra_ctr_cpuset = "{{ .InfraCtrCPUSet }}"
`
const templateStringCrioRuntimeSharedCpuset = `# shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers,
# regardless of, and in addition to, the exclusiveness of their CPUs.
# This field is optional and would not be used if not specified.
# You can specify CPUs in the Linux CPU list format.
{{ $.Comment }}shared_cpuset = "{{ .SharedCPUSet }}"
`
const templateStringCrioRuntimeNamespacesDir = `# The directory where the state of the managed namespaces gets tracked.
# Only used when manage_ns_lifecycle is true.
{{ $.Comment }}namespaces_dir = "{{ .NamespacesDir }}"
`
const templateStringCrioRuntimePinnsPath = `# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
{{ $.Comment }}pinns_path = "{{ .PinnsPath }}"
`
const templateStringCrioRuntimeEnableCriuSupport = `# Globally enable/disable CRIU support which is necessary to
# checkpoint and restore container or pods (even if CRIU is found in $PATH).
{{ $.Comment }}enable_criu_support = {{ .EnableCriuSupport }}
`
const templateStringCrioRuntimeEnablePodEvents = `# Enable/disable the generation of the container,
# sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG
{{ $.Comment }}enable_pod_events = {{ .EnablePodEvents }}
`
const templateStringCrioRuntimeDefaultRuntime = `# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below.
{{ $.Comment }}default_runtime = "{{ .DefaultRuntime }}"
`
const templateStringCrioRuntimeAbsentMountSourcesToReject = `# A list of paths that, when absent from the host,
# will cause a container creation to fail (as opposed to the current behavior being created as a directory).
# This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose
# creation as a file is not desired either.
# An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because
# the hostname is being managed dynamically.
{{ $.Comment }}absent_mount_sources_to_reject = [
{{ range $mount := .AbsentMountSourcesToReject}}{{ $.Comment }}{{ printf "\t%q,\n" $mount}}{{ end }}{{ $.Comment }}]
`
const templateStringCrioRuntimeRuntimesRuntimeHandler = `# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime handler provided by the CRI.
# If no runtime handler is provided, the "default_runtime" will be used.
# Each entry in the table should follow the format:
#
# [crio.runtime.runtimes.runtime-handler]
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
# inherit_default_runtime = false
# monitor_path = "/path/to/container/monitor"
# monitor_cgroup = "/cgroup/path"
# monitor_exec_cgroup = "/cgroup/path"
# monitor_env = []
# privileged_without_host_devices = false
# allowed_annotations = []
# platform_runtime_paths = { "os/arch" = "/path/to/binary" }
# no_sync_log = false
# default_annotations = {}
# stream_websockets = false
# seccomp_profile = ""
# Where:
# - runtime-handler: Name used to identify the runtime.
# - runtime_path (optional, string): Absolute path to the runtime executable in
# the host filesystem. If omitted, the runtime-handler identifier should match
# the runtime executable name, and the runtime executable should be placed
# in $PATH.
# - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): Root directory for storage of containers
# state.
# - runtime_config_path (optional, string): the path for the runtime configuration
# file. This can only be used with when using the VM runtime_type.
# - inherit_default_runtime (optional, bool): when true the runtime_path,
# runtime_type, runtime_root and runtime_config_path will be replaced by
# the values from the default runtime on load time.
# - privileged_without_host_devices (optional, bool): an option for restricting
# host devices from being passed to privileged containers.
# - allowed_annotations (optional, array of strings): an option for specifying
# a list of experimental annotations that this runtime handler is allowed to process.
# The currently recognized values are:
# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
# "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true".
# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
# "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container.
# "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook.
# "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature.
# "io.kubernetes.cri-o.umask" for setting the umask for container init process.
# "io.kubernetes.cri.rdt-class" for setting the RDT class of a container
# "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for:
# - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/<CONTAINER_NAME>"
# - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD"
# Note that the annotation works on containers as well as on images.
# For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io"
# can be used without the required "/POD" suffix or a container name.
# "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster.
# - monitor_path (optional, string): The path of the monitor binary. Replaces
# deprecated option "conmon".
# - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in.
# Replaces deprecated option "conmon_cgroup".
# - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes
# should be moved to the container's cgroup
# - monitor_env (optional, array of strings): Environment variables to pass to the monitor.
# Replaces deprecated option "conmon_env".
# When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure
# conmon-rs by using:
# - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none.
# - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory.
# - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary.
# - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding
# runtime executable paths for the runtime handler.
# - container_min_memory (optional, string): The minimum memory that must be set for a container.
# This value can be used to override the currently set global value for a specific runtime. If not set,
# a global default value of "12 MiB" will be used.
# - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit.
# This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g.
# when a machine crash happens.
# - default_annotations (optional, map): Default annotations if not overridden by the pod spec.
# - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward.
# - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default
# seccomp profile for the runtime.
# If not specified or set to "", the runtime seccomp_profile will be used.
# If that is also not specified or set to "", the internal default seccomp profile will be applied.
#
# Using the seccomp notifier feature:
#
# This feature can help you to debug seccomp related issues, for example if
# blocked syscalls (permission denied errors) have negative impact on the workload.
#
# To be able to use this feature, configure a runtime which has the annotation
# "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array.
#
# It also requires at least runc 1.1.0 or crun 0.19 which support the notifier
# feature.
#
# If everything is setup, CRI-O will modify chosen seccomp profiles for
# containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is
# set on the Pod sandbox. CRI-O will then get notified if a container is using
# a blocked syscall and then terminate the workload after a timeout of 5
# seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop".
#
# This also means that multiple syscalls can be captured during that period,
# while the timeout will get reset once a new syscall has been discovered.
#
# This also means that the Pods "restartPolicy" has to be set to "Never",
# otherwise the kubelet will restart the container immediately.
#
# Please be aware that CRI-O is not able to get notified if a syscall gets
# blocked based on the seccomp defaultAction, which is a general runtime
# limitation.
{{ range $runtime_name, $runtime_handler := .Runtimes }}
{{ $.Comment }}[crio.runtime.runtimes.{{ $runtime_name }}]
{{ $.Comment }}runtime_path = "{{ $runtime_handler.RuntimePath }}"
{{ $.Comment }}runtime_type = "{{ $runtime_handler.RuntimeType }}"
{{ $.Comment }}runtime_root = "{{ $runtime_handler.RuntimeRoot }}"
{{ $.Comment }}inherit_default_runtime = {{ $runtime_handler.InheritDefaultRuntime }}
{{ $.Comment }}runtime_config_path = "{{ $runtime_handler.RuntimeConfigPath }}"
{{ $.Comment }}container_min_memory = "{{ $runtime_handler.ContainerMinMemory }}"
{{ $.Comment }}monitor_path = "{{ $runtime_handler.MonitorPath }}"
{{ $.Comment }}monitor_cgroup = "{{ $runtime_handler.MonitorCgroup }}"
{{ $.Comment }}monitor_exec_cgroup = "{{ $runtime_handler.MonitorExecCgroup }}"
{{ $.Comment }}{{ if $runtime_handler.MonitorEnv }}monitor_env = [
{{ range $opt := $runtime_handler.MonitorEnv }}{{ $.Comment }}{{ printf "\t%q,\n" $opt }}{{ end }}{{ $.Comment }}]{{ end }}
{{ if $runtime_handler.AllowedAnnotations }}{{ $.Comment }}allowed_annotations = [
{{ range $opt := $runtime_handler.AllowedAnnotations }}{{ $.Comment }}{{ printf "\t%q,\n" $opt }}{{ end }}{{ $.Comment }}]{{ end }}
{{ $.Comment }}privileged_without_host_devices = {{ $runtime_handler.PrivilegedWithoutHostDevices }}
{{ if $runtime_handler.PlatformRuntimePaths }}platform_runtime_paths = {
{{- $first := true }}{{- range $key, $value := $runtime_handler.PlatformRuntimePaths }}
{{- if not $first }},{{ end }}{{- printf "%q = %q" $key $value }}{{- $first = false }}{{- end }}}
{{ end }}
{{ if $runtime_handler.DefaultAnnotations }}default_annotations = {
{{- $first := true }}{{- range $key, $value := $runtime_handler.DefaultAnnotations }}
{{- if not $first }},{{ end }}{{- printf "%q = %q" $key $value }}{{- $first = false }}{{- end }}}
{{ end }}
{{ end }}
`
const templateStringCrioRuntimeWorkloads = `# The workloads table defines ways to customize containers with different resources
# that work based on annotations, rather than the CRI.
# Note, the behavior of this table is EXPERIMENTAL and may change at any time.
# Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating.
# The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds.
# The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod".
# Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration.
# Each resource can have a default value specified, or be empty.
# For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored).
# To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified
# signifying for that resource type to override the default value.
# If the annotation_prefix is not present, every container in the pod will be given the default values.
# Example:
# [crio.runtime.workloads.workload-type]
# activation_annotation = "io.crio/workload"
# annotation_prefix = "io.crio.workload-type"
# [crio.runtime.workloads.workload-type.resources]
# cpuset = "0-1"
# cpushares = "5"
# cpuquota = "1000"
# cpuperiod = "100000"
# cpulimit = "35"
# Where:
# The workload name is workload-type.
# To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match).
# This workload supports setting cpuset and cpu resources.
# annotation_prefix is used to customize the different resources.
# To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation:
# "io.crio.workload-type/$container_name = {"cpushares": "value"}"
{{ range $workload_type, $workload_config := .Workloads }}
{{ $.Comment }}[crio.runtime.workloads.{{ $workload_type }}]
{{ $.Comment }}activation_annotation = "{{ $workload_config.ActivationAnnotation }}"
{{ $.Comment }}annotation_prefix = "{{ $workload_config.AnnotationPrefix }}"
{{ if $workload_config.Resources }}{{ $.Comment }}[crio.runtime.workloads.{{ $workload_type }}.resources]
{{ $.Comment }}cpuset = "{{ $workload_config.Resources.CPUSet }}"
{{ $.Comment }}cpuquota = {{ $workload_config.Resources.CPUQuota }}
{{ $.Comment }}cpuperiod = {{ $workload_config.Resources.CPUPeriod }}
{{ $.Comment }}cpushares = {{ $workload_config.Resources.CPUShares }}
{{ $.Comment }}cpulimit = {{ $workload_config.Resources.CPULimit }}{{ end }}
{{ end }}
`
const templateStringCrioRuntimeHostNetworkDisableSELinux = `# hostnetwork_disable_selinux determines whether
# SELinux should be disabled within a pod when it is running in the host network namespace
# Default value is set to true
{{ $.Comment }}hostnetwork_disable_selinux = {{ .HostNetworkDisableSELinux }}
`
const templateStringCrioRuntimeDisableHostPortMapping = `# disable_hostport_mapping determines whether to enable/disable
# the container hostport mapping in CRI-O.
# Default value is set to 'false'
{{ $.Comment }}disable_hostport_mapping = {{ .DisableHostPortMapping }}
`
const templateStringCrioRuntimeTimezone = `# timezone To set the timezone for a container in CRI-O.
# If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine.
{{ $.Comment }}timezone = "{{ .Timezone }}"
`
const templateStringCrioImage = `# The crio.image table contains settings pertaining to the management of OCI images.
#
# CRI-O reads its configured registries defaults from the system wide
# containers-registries.conf(5) located in /etc/containers/registries.conf.
[crio.image]
`
const templateStringCrioImageDefaultTransport = `# Default transport for pulling images from a remote container storage.
{{ $.Comment }}default_transport = "{{ .DefaultTransport }}"
`
const templateStringCrioImageGlobalAuthFile = `# The path to a file containing credentials necessary for pulling images from
# secure registries. The file is similar to that of /var/lib/kubelet/config.json
{{ $.Comment }}global_auth_file = "{{ .GlobalAuthFile }}"
`
const templateStringCrioImagePauseImage = `# The image used to instantiate infra containers.
# This option supports live configuration reload.
{{ $.Comment }}pause_image = "{{ .PauseImage }}"
`
const templateStringCrioImagePauseImageAuthFile = `# The path to a file containing credentials specific for pulling the pause_image from
# above. The file is similar to that of /var/lib/kubelet/config.json
# This option supports live configuration reload.
{{ $.Comment }}pause_image_auth_file = "{{ .PauseImageAuthFile }}"
`
const templateStringCrioImagePauseCommand = `# The command to run to have a container stay in the paused state.
# When explicitly set to "", it will fallback to the entrypoint and command
# specified in the pause image. When commented out, it will fallback to the
# default: "/pause". This option supports live configuration reload.
{{ $.Comment }}pause_command = "{{ .PauseCommand }}"
`
const templateStringCrioImagePinnedImages = `# List of images to be excluded from the kubelet's garbage collection.
# It allows specifying image names using either exact, glob, or keyword
# patterns. Exact matches must match the entire name, glob matches can
# have a wildcard * at the end, and keyword matches can have wildcards
# on both ends. By default, this list includes the "pause" image if
# configured by the user, which is used as a placeholder in Kubernetes pods.
{{ $.Comment }}pinned_images = [
{{ range $opt := .PinnedImages }}{{ $.Comment }}{{ printf "\t%q,\n" $opt }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioImageSignaturePolicy = `# Path to the file which decides what sort of policy we use when deciding
# whether or not to trust an image that we've pulled. It is not recommended that
# this option be used, as the default behavior of using the system-wide default
# policy (i.e., /etc/containers/policy.json) is most often preferred. Please
# refer to containers-policy.json(5) for more details.
{{ $.Comment }}signature_policy = "{{ .SignaturePolicyPath }}"
`
const templateStringCrioImageSignaturePolicyDir = `# Root path for pod namespace-separated signature policies.
# The final policy to be used on image pull will be <SIGNATURE_POLICY_DIR>/<NAMESPACE>.json.
# If no pod namespace is being provided on image pull (via the sandbox config),
# or the concatenated path is non existent, then the signature_policy or system
# wide policy will be used as fallback. Must be an absolute path.
{{ $.Comment }}signature_policy_dir = "{{ .SignaturePolicyDir }}"
`
const templateStringCrioImageInsecureRegistries = `# List of registries to skip TLS verification for pulling images. Please
# consider configuring the registries via /etc/containers/registries.conf before
# changing them here.
# This option is deprecated and no longer effective. Use registries.conf file instead.
{{ $.Comment }}insecure_registries = [
{{ range $opt := .InsecureRegistries }}{{ $.Comment }}{{ printf "\t%q,\n" $opt }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioImageImageVolumes = `# Controls how image volumes are handled. The valid values are mkdir, bind and
# ignore; the latter will ignore volumes entirely.
{{ $.Comment }}image_volumes = "{{ .ImageVolumes }}"
`
const templateStringCrioImageBigFilesTemporaryDir = `# Temporary directory to use for storing big files
{{ $.Comment }}big_files_temporary_dir = "{{ .BigFilesTemporaryDir }}"
`
const templateStringCrioImageAutoReloadRegistries = `# If true, CRI-O will automatically reload the mirror registry when
# there is an update to the 'registries.conf.d' directory. Default value is set to 'false'.
{{ $.Comment }}auto_reload_registries = {{ .AutoReloadRegistries }}
`
const templateStringCrioImagePullProgressTimeout = `# The timeout for an image pull to make progress until the pull operation
# gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10.
# Can be set to 0 to disable the timeout as well as the progress output.
{{ $.Comment }}pull_progress_timeout = "{{ .PullProgressTimeout }}"
`
const templateStringCrioImageShortNameMode = `# The mode of short name resolution.
# The valid values are "enforcing" and "disabled", and the default is "enforcing".
# If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous.
# If "disabled", the first result will be chosen.
{{ $.Comment }}short_name_mode = "{{ .ShortNameMode }}"
`
const templateStringCrioNetwork = `# The crio.network table containers settings pertaining to the management of
# CNI plugins.
[crio.network]
`
const templateStringCrioNetworkCniDefaultNetwork = `# The default CNI network name to be selected. If not set or "", then
# CRI-O will pick-up the first one found in network_dir.
# cni_default_network = "{{ .CNIDefaultNetwork }}"
`
const templateStringCrioNetworkNetworkDir = `# Path to the directory where CNI configuration files are located.
{{ $.Comment }}network_dir = "{{ .NetworkDir }}"
`
const templateStringCrioNetworkPluginDirs = `# Paths to directories where CNI plugin binaries are located.
{{ $.Comment }}plugin_dirs = [
{{ range $opt := .PluginDirs }}{{ $.Comment }}{{ printf "\t%q,\n" $opt }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioMetrics = `# A necessary configuration for Prometheus based metrics retrieval
[crio.metrics]
`
const templateStringCrioMetricsEnableMetrics = `# Globally enable or disable metrics support.
{{ $.Comment }}enable_metrics = {{ .EnableMetrics }}
`
const templateStringCrioMetricsCollectors = `# Specify enabled metrics collectors.
# Per default all metrics are enabled.
# It is possible, to prefix the metrics with "container_runtime_" and "crio_".
# For example, the metrics collector "operations" would be treated in the same
# way as "crio_operations" and "container_runtime_crio_operations".
{{ $.Comment }}metrics_collectors = [
{{ range $opt := .MetricsCollectors }}{{ $.Comment }}{{ printf "\t%q,\n" $opt }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioMetricsMetricsHost = `# The IP address or hostname on which the metrics server will listen.
{{ $.Comment }}metrics_host = "{{ .MetricsHost }}"
`
const templateStringCrioMetricsMetricsPort = `# The port on which the metrics server will listen.
{{ $.Comment }}metrics_port = {{ .MetricsPort }}
`
const templateStringCrioMetricsMetricsSocket = `# Local socket path to bind the metrics server to
{{ $.Comment }}metrics_socket = "{{ .MetricsSocket }}"
`
const templateStringCrioMetricsMetricsCert = `# The certificate for the secure metrics server.
# If the certificate is not available on disk, then CRI-O will generate a
# self-signed one. CRI-O also watches for changes of this path and reloads the
# certificate on any modification event.
{{ $.Comment }}metrics_cert = "{{ .MetricsCert }}"
`
const templateStringCrioMetricsMetricsKey = `# The certificate key for the secure metrics server.
# Behaves in the same way as the metrics_cert.
{{ $.Comment }}metrics_key = "{{ .MetricsKey }}"
`
const templateStringCrioTracing = `# A necessary configuration for OpenTelemetry trace data exporting
[crio.tracing]
`
const templateStringCrioTracingEnableTracing = `# Globally enable or disable exporting OpenTelemetry traces.
{{ $.Comment }}enable_tracing = {{ .EnableTracing }}
`
const templateStringCrioTracingTracingEndpoint = `# Address on which the gRPC trace collector listens on.
{{ $.Comment }}tracing_endpoint = "{{ .TracingEndpoint }}"
`
const templateStringCrioTracingTracingSamplingRatePerMillion = `# Number of samples to collect per million spans. Set to 1000000 to always sample.
{{ $.Comment }}tracing_sampling_rate_per_million = {{ .TracingSamplingRatePerMillion }}
`
const templateStringCrioStats = `# Necessary information pertaining to container and pod stats reporting.
[crio.stats]
`
const templateStringCrioStatsStatsCollectionPeriod = `# The number of seconds between collecting pod and container stats.
# If set to 0, the stats are collected on-demand instead.
{{ $.Comment }}stats_collection_period = {{ .StatsCollectionPeriod }}
`
const templateStringCrioStatsCollectionPeriod = `# The number of seconds between collecting pod/container stats and pod
# sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead.
{{ $.Comment }}collection_period = {{ .CollectionPeriod }}
`
const templateStringCrioStatsIncludedPodMetrics = `# List of included pod metrics.
{{ $.Comment }}included_pod_metrics = [
{{ range $opt := .IncludedPodMetrics }}{{ $.Comment }}{{ printf "\t%q,\n" $opt }}{{ end }}{{ $.Comment }}]
`
const templateStringCrioNRI = `# CRI-O NRI configuration.
[crio.nri]
`
const templateStringCrioNRIEnable = `# Globally enable or disable NRI.
{{ $.Comment }}enable_nri = {{ .NRI.Enabled }}
`
const templateStringCrioNRISocketPath = `# NRI socket to listen on.
{{ $.Comment }}nri_listen = "{{ .NRI.SocketPath }}"
`
const templateStringCrioNRIPluginDir = `# NRI plugin directory to use.
{{ $.Comment }}nri_plugin_dir = "{{ .NRI.PluginPath }}"
`
const templateStringCrioNRIPluginConfigDir = `# NRI plugin configuration directory to use.
{{ $.Comment }}nri_plugin_config_dir = "{{ .NRI.PluginConfigPath }}"
`
const templateStringCrioNRIDisableConnections = `# Disable connections from externally launched NRI plugins.
{{ $.Comment }}nri_disable_connections = {{ .NRI.DisableConnections }}
`
const templateStringCrioNRIPluginRegistrationTimeout = `# Timeout for a plugin to register itself with NRI.
{{ $.Comment }}nri_plugin_registration_timeout = "{{ .NRI.PluginRegistrationTimeout }}"
`
const templateStringCrioNRIPluginRequestTimeout = `# Timeout for a plugin to handle an NRI request.
{{ $.Comment }}nri_plugin_request_timeout = "{{ .NRI.PluginRequestTimeout }}"
`
const templateStringCrioNRIDefaultValidator = `# NRI default validator configuration.
# If enabled, the builtin default validator can be used to reject a container if some
# NRI plugin requested a restricted adjustment. Currently the following adjustments
# can be restricted/rejected:
# - OCI hook injection
# - adjustment of runtime default seccomp profile
# - adjustment of unconfied seccomp profile
# - adjustment of a custom seccomp profile
# - adjustment of linux namespaces
# Additionally, the default validator can be used to reject container creation if any
# of a required set of plugins has not processed a container creation request, unless
# the container has been annotated to tolerate a missing plugin.
#
{{ $.Comment }}[crio.nri.default_validator]
{{ $.Comment }}nri_enable_default_validator = {{ .NRI.DefaultValidator.Enable }}
{{ $.Comment }}nri_validator_reject_oci_hook_adjustment = {{ .NRI.DefaultValidator.RejectOCIHookAdjustment }}
{{ $.Comment }}nri_validator_reject_runtime_default_seccomp_adjustment = {{ .NRI.DefaultValidator.RejectRuntimeDefaultSeccompAdjustment }}
{{ $.Comment }}nri_validator_reject_unconfined_seccomp_adjustment = {{ .NRI.DefaultValidator.RejectUnconfinedSeccompAdjustment }}
{{ $.Comment }}nri_validator_reject_custom_seccomp_adjustment = {{ .NRI.DefaultValidator.RejectCustomSeccompAdjustment }}
{{ $.Comment }}nri_validator_reject_namespace_adjustment = {{ .NRI.DefaultValidator.RejectNamespaceAdjustment }}
{{ $.Comment }}nri_validator_required_plugins = [
{{ range $p := .NRI.DefaultValidator.RequiredPlugins }}{{ $.Comment }}{{ printf "\t%q,\n" $p }}{{ end }}{{ $.Comment }}]
{{ $.Comment }}nri_validator_tolerate_missing_plugins_annotation = "{{ .NRI.DefaultValidator.TolerateMissingAnnotation }}"
`
package config
import (
"encoding/json"
"fmt"
"strings"
"github.com/opencontainers/runtime-tools/generate"
"github.com/sirupsen/logrus"
"k8s.io/utils/cpuset"
)
const (
milliCPUToCPU = 1000
// 100000 microseconds is equivalent to 100ms.
defaultQuotaPeriod = 100000
// 1000 microseconds is equivalent to 1ms
// defined here:
// https://github.com/torvalds/linux/blob/cac03ac368fabff0122853de2422d4e17a32de08/kernel/sched/core.c#L10546
minQuotaPeriod = 1000
)
type Workloads map[string]*WorkloadConfig
type WorkloadConfig struct {
// ActivationAnnotation is the pod annotation that activates these workload settings
ActivationAnnotation string `toml:"activation_annotation"`
// AnnotationPrefix is the way a pod can override a specific resource for a container.
// The full annotation must be of the form $annotation_prefix.$resource/$ctrname = $value
AnnotationPrefix string `toml:"annotation_prefix"`
// AllowedAnnotations is a slice of experimental annotations that this workload is allowed to process.
// The currently recognized values are:
// "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
// "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
// "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
// "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container.
// "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook.
// "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for a specific container, pod or whole image.
AllowedAnnotations []string `toml:"allowed_annotations,omitempty"`
// DisallowedAnnotations is the slice of experimental annotations that are not allowed for this workload.
DisallowedAnnotations []string
// Resources are the names of the resources that can be overridden by annotation.
// The key of the map is the resource name. The following resources are supported:
// `cpushares`: configure cpu shares for a given container
// `cpuquota`: configure cpu quota for a given container
// `cpuperiod`: configure cpu period for a given container
// `cpuset`: configure cpuset for a given container
// `cpulimit`: configure cpu quota in millicores for a given container, overrides the `cpuquota` field
// The value of the map is the default value for that resource.
// If a container is configured to use this workload, and does not specify
// the annotation with the resource and value, the default value will apply.
// Default values do not need to be specified.
Resources *Resources `toml:"resources"`
}
// Resources is a structure for overriding certain resources for the pod.
// This resources structure provides a default value, and can be overridden
// by using the AnnotationPrefix.
type Resources struct {
// Specifies the number of CPU shares this Pod has access to.
CPUShares uint64 `json:"cpushares,omitempty"`
// Specifies the CPU quota this Pod is limited to in microseconds.
CPUQuota int64 `json:"cpuquota,omitempty"`
// Specifies the CPU period this Pod will use in microseconds.
CPUPeriod uint64 `json:"cpuperiod,omitempty"`
// Specifies the cpuset this Pod has access to.
CPUSet string `json:"cpuset,omitempty"`
// Specifies the CPU limit in millicores. This will be used to calculate the CPU quota.
CPULimit int64 `json:"cpulimit,omitempty"`
}
func (w Workloads) Validate() error {
for workload, config := range w {
if err := config.Validate(workload); err != nil {
return err
}
}
return nil
}
func (w *WorkloadConfig) Validate(workloadName string) error {
if w.ActivationAnnotation == "" {
return fmt.Errorf("annotation shouldn't be empty for workload %q", workloadName)
}
if err := w.ValidateWorkloadAllowedAnnotations(); err != nil {
return err
}
return w.Resources.ValidateDefaults()
}
func (w *WorkloadConfig) ValidateWorkloadAllowedAnnotations() error {
disallowed, err := validateAllowedAndGenerateDisallowedAnnotations(w.AllowedAnnotations)
if err != nil {
return err
}
logrus.Debugf(
"Allowed annotations for workload: %v", w.AllowedAnnotations,
)
w.DisallowedAnnotations = disallowed
return nil
}
func (w Workloads) AllowedAnnotations(toFind map[string]string) []string {
workload := w.workloadGivenActivationAnnotation(toFind)
if workload == nil {
return []string{}
}
return workload.AllowedAnnotations
}
// FilterDisallowedAnnotations filters annotations that are not specified in the allowed_annotations map
// for a given handler.
// This function returns an error if the runtime handler can't be found.
// The annotations map is mutated in-place.
func (w Workloads) FilterDisallowedAnnotations(allowed []string, toFilter map[string]string) error {
disallowed, err := validateAllowedAndGenerateDisallowedAnnotations(allowed)
if err != nil {
return err
}
logrus.Infof("Allowed annotations are specified for workload %v", allowed)
for ann := range toFilter {
for _, d := range disallowed {
if strings.HasPrefix(ann, d) {
delete(toFilter, ann)
}
}
}
return nil
}
func (w Workloads) MutateSpecGivenAnnotations(ctrName string, specgen *generate.Generator, sboxAnnotations map[string]string) error {
workload := w.workloadGivenActivationAnnotation(sboxAnnotations)
if workload == nil {
return nil
}
resources, err := resourcesFromAnnotation(workload.AnnotationPrefix, ctrName, sboxAnnotations, workload.Resources)
if err != nil {
return err
}
resources.MutateSpec(specgen)
return nil
}
func (w Workloads) workloadGivenActivationAnnotation(sboxAnnotations map[string]string) *WorkloadConfig {
for _, wc := range w {
for annotation := range sboxAnnotations {
if wc.ActivationAnnotation == annotation {
return wc
}
}
}
return nil
}
func resourcesFromAnnotation(prefix, ctrName string, allAnnotations map[string]string, defaultResources *Resources) (*Resources, error) {
annotationKey := prefix + "/" + ctrName
value, ok := allAnnotations[annotationKey]
if !ok {
return defaultResources, nil
}
var resources *Resources
if err := json.Unmarshal([]byte(value), &resources); err != nil {
return nil, err
}
if resources == nil {
return nil, nil
}
if resources.CPUSet == "" {
resources.CPUSet = defaultResources.CPUSet
}
if resources.CPUShares == 0 {
resources.CPUShares = defaultResources.CPUShares
}
if resources.CPUQuota == 0 {
resources.CPUQuota = defaultResources.CPUQuota
}
if resources.CPUPeriod == 0 {
resources.CPUPeriod = defaultResources.CPUPeriod
}
if resources.CPULimit == 0 {
resources.CPULimit = defaultResources.CPULimit
}
// If a CPU Limit in Milli is supplied via the annotation, calculate quota with the given CPU period.
if resources.CPULimit != 0 {
resources.CPUQuota = milliCPUToQuota(resources.CPULimit, int64(resources.CPUPeriod))
}
return resources, nil
}
// milliCPUToQuota converts milliCPU to CFS quota and period values.
// Input parameters and resulting value is number of microseconds.
func milliCPUToQuota(milliCPU, period int64) (quota int64) {
if milliCPU == 0 {
return quota
}
if period == 0 {
period = defaultQuotaPeriod
}
// We then convert the milliCPU to a value normalized over a period.
quota = max(
// quota needs to be a minimum of 1ms.
(milliCPU*period)/milliCPUToCPU, minQuotaPeriod)
return quota
}
func (r *Resources) ValidateDefaults() error {
if r == nil {
return nil
}
if _, err := cpuset.Parse(r.CPUSet); err != nil {
return fmt.Errorf("unable to parse cpuset %q: %w", r.CPUSet, err)
}
if r.CPUQuota != 0 && r.CPUQuota < int64(r.CPUShares) {
return fmt.Errorf("cpuquota %d cannot be less than cpushares %d", r.CPUQuota, r.CPUShares)
}
if r.CPUPeriod != 0 && r.CPUPeriod < minQuotaPeriod {
return fmt.Errorf("cpuperiod %d cannot be less than 1000 microseconds", r.CPUPeriod)
}
return nil
}
func (r *Resources) MutateSpec(specgen *generate.Generator) {
if r == nil {
return
}
if r.CPUSet != "" {
specgen.SetLinuxResourcesCPUCpus(r.CPUSet)
}
if r.CPUShares != 0 {
specgen.SetLinuxResourcesCPUShares(r.CPUShares)
}
if r.CPUQuota != 0 {
specgen.SetLinuxResourcesCPUQuota(r.CPUQuota)
}
if r.CPUPeriod != 0 {
specgen.SetLinuxResourcesCPUPeriod(r.CPUPeriod)
}
}
package collectors
import "strings"
// Collector specifies a single metrics collector identifier.
type Collector string
// Collectors specifies a list of metrics collectors.
type Collectors []Collector
const (
crioPrefix = "crio_"
// Subsystem is the namespace where the metrics are being registered.
Subsystem = "container_runtime"
subsystemPrefix = Subsystem + "_"
// ImagePullsLayerSize is the key for CRI-O image pull metrics per layer.
ImagePullsLayerSize Collector = crioPrefix + "image_pulls_layer_size"
// ContainersEventsDropped is the key for the total number of container events dropped counter.
ContainersEventsDropped Collector = crioPrefix + "containers_events_dropped_total"
// ContainersOOMTotal is the key for the total CRI-O container out of memory metrics.
ContainersOOMTotal Collector = crioPrefix + "containers_oom_total"
// ProcessesDefunct is the key for the total number of defunct processes in a node.
ProcessesDefunct Collector = crioPrefix + "processes_defunct"
// OperationsTotal is the key for CRI-O operation metrics.
OperationsTotal Collector = crioPrefix + "operations_total"
// OperationsLatencySeconds is the key for the operation latency metrics for each CRI call.
OperationsLatencySeconds Collector = crioPrefix + "operations_latency_seconds"
// OperationsLatencySecondsTotal is the key for the operation latency metrics.
OperationsLatencySecondsTotal Collector = crioPrefix + "operations_latency_seconds_total"
// OperationsErrorsTotal is the key for the operation error metrics.
OperationsErrorsTotal Collector = crioPrefix + "operations_errors_total"
// ImagePullsBytesTotal is the key for CRI-O image pull metrics.
ImagePullsBytesTotal Collector = crioPrefix + "image_pulls_bytes_total"
// ImagePullsSkippedBytesTotal is the key for CRI-O skipped image pull metrics.
ImagePullsSkippedBytesTotal Collector = crioPrefix + "image_pulls_skipped_bytes_total"
// ImagePullsFailureTotal is the key for failed image downloads in CRI-O.
ImagePullsFailureTotal Collector = crioPrefix + "image_pulls_failure_total"
// ImagePullsSuccessTotal is the key for successful image downloads in CRI-O.
ImagePullsSuccessTotal Collector = crioPrefix + "image_pulls_success_total"
// ImageLayerReuseTotal is the key for the CRI-O image layer reuse metrics.
ImageLayerReuseTotal Collector = crioPrefix + "image_layer_reuse_total"
// ContainersOOMCountTotal is the key for the CRI-O container out of memory metrics per container name.
ContainersOOMCountTotal Collector = crioPrefix + "containers_oom_count_total"
// ContainersSeccompNotifierCountTotal is the key for the CRI-O container seccomp notifier metrics per container name and syscalls.
ContainersSeccompNotifierCountTotal Collector = crioPrefix + "containers_seccomp_notifier_count_total"
// ResourcesStalledAtStage is the key for the resources stalled at different stages in container and pod creation.
ResourcesStalledAtStage Collector = crioPrefix + "resources_stalled_at_stage"
// ContainersStoppedMonitorCount is the key for the containers whose monitor is stopped per container name.
ContainersStoppedMonitorCount Collector = crioPrefix + "containers_stopped_monitor_count"
)
// FromSlice converts a string slice to a Collectors type.
func FromSlice(in []string) (c Collectors) {
for _, i := range in {
c = append(c, Collector(i).Stripped())
}
return c
}
// ToSlice converts a Collectors type to a string slice.
func (c Collectors) ToSlice() (r []string) {
for _, i := range c {
r = append(r, i.Stripped().String())
}
return r
}
// All returns all available metrics collectors referenced by their
// name key.
func All() Collectors {
return Collectors{
ImagePullsLayerSize.Stripped(),
ContainersEventsDropped.Stripped(),
ContainersOOMTotal.Stripped(),
ProcessesDefunct.Stripped(),
OperationsTotal.Stripped(),
OperationsLatencySeconds.Stripped(),
OperationsLatencySecondsTotal.Stripped(),
OperationsErrorsTotal.Stripped(),
ImagePullsBytesTotal.Stripped(),
ImagePullsSkippedBytesTotal.Stripped(),
ImagePullsFailureTotal.Stripped(),
ImagePullsSuccessTotal.Stripped(),
ImageLayerReuseTotal.Stripped(),
ContainersOOMCountTotal.Stripped(),
ContainersSeccompNotifierCountTotal.Stripped(),
ResourcesStalledAtStage.Stripped(),
ContainersStoppedMonitorCount.Stripped(),
}
}
// Contains returns true if the provided Collector `in` is part of the
// collectors instance.
func (c Collectors) Contains(in Collector) bool {
stripped := in.Stripped()
for _, collector := range c {
if stripped == collector.Stripped() {
return true
}
}
return false
}
// stripPrefix strips the metrics prefixes from the provided string.
func stripPrefix(s string) string {
s = strings.TrimPrefix(s, subsystemPrefix)
return strings.TrimPrefix(s, crioPrefix)
}
// Stripped returns a prefix stripped name for the collector.
func (c Collector) Stripped() Collector {
return Collector(stripPrefix(c.String()))
}
// String returns a string for the collector.
func (c Collector) String() string {
return string(c)
}
package useragent
import (
"fmt"
"regexp"
"runtime"
"github.com/cri-o/cri-o/internal/version"
)
// Simplest semver "X.Y.Z" format.
var versionRegex = regexp.MustCompile(`(\d+\.\d+\.\d+)`)
// Get is the User-Agent the CRI-O daemon uses to identify itself.
func Get() (string, error) {
info, err := version.Get(false)
if err != nil {
return "", fmt.Errorf("get version: %w", err)
}
// Ensure that the CRI-O version set in the User-Agent header
// is always of the simplest semver format, and remove everything
// else that might have been added as part of the build process.
versionString := info.Version
if s := versionRegex.FindString(versionString); s != "" {
versionString = s
}
httpVersion := AppendVersions("", []VersionInfo{
{Name: "cri-o", Version: versionString},
{Name: "go", Version: info.GoVersion},
{Name: "os", Version: runtime.GOOS},
{Name: "arch", Version: runtime.GOARCH},
}...)
return httpVersion, nil
}
// Package useragent provides helper functions to pack
// version information into a single User-Agent header.
package useragent
import (
"strings"
)
// VersionInfo is used to model UserAgent versions.
type VersionInfo struct {
Name string
Version string
}
func (vi *VersionInfo) isValid() bool {
const stopChars = " \t\r\n/"
if strings.ContainsAny(vi.Name, stopChars) {
return false
}
if strings.ContainsAny(vi.Version, stopChars) {
return false
}
return true
}
// AppendVersions converts versions to a string and appends the string to the string base.
//
// Each VersionInfo will be converted to a string in the format of
// "product/version", where the "product" is get from the name field, while
// version is get from the version field. Several pieces of version information
// will be concatenated and separated by space.
//
// Example:
// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"})
// results in "base foo/1.0 bar/2.0".
func AppendVersions(base string, versions ...VersionInfo) string {
if len(versions) == 0 {
return base
}
verstrs := make([]string, 0, 1+len(versions))
if base != "" {
verstrs = append(verstrs, base)
}
for _, v := range versions {
if !v.isValid() {
continue
}
verstrs = append(verstrs, v.Name+"/"+v.Version)
}
return strings.Join(verstrs, " ")
}
package framework
import (
"errors"
"os"
"path/filepath"
"testing"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
// TestFramework is used to support commonnly used test features.
type TestFramework struct {
setup func(*TestFramework) error
teardown func(*TestFramework) error
TestError error
tempDirs []string
tempFiles []string
}
// NewTestFramework creates a new test framework instance for a given `setup`
// and `teardown` function.
func NewTestFramework(setup, teardown func(*TestFramework) error) *TestFramework {
return &TestFramework{
setup,
teardown,
errors.New("error"),
nil,
nil,
}
}
// NilFunc is a convenience function which simply does nothing.
func NilFunc(f *TestFramework) error {
return nil
}
// Setup is the global initialization function which runs before each test
// suite.
func (t *TestFramework) Setup() {
// Global initialization for the whole framework goes in here
// Setup the actual test suite
gomega.Expect(t.setup(t)).To(gomega.Succeed())
}
// Teardown is the global deinitialization function which runs after each test
// suite.
func (t *TestFramework) Teardown() {
// Global deinitialization for the whole framework goes in here
// Teardown the actual test suite
gomega.Expect(t.teardown(t)).To(gomega.Succeed())
// Clean up any temporary directories and files the test suite created.
for _, d := range t.tempDirs {
os.RemoveAll(d)
}
for _, d := range t.tempFiles {
os.RemoveAll(d)
}
}
// Describe is a convenience wrapper around the `ginkgo.Describe` function.
func (t *TestFramework) Describe(text string, body func()) bool {
return ginkgo.Describe("cri-o: "+text, body)
}
// MustTempDir uses os.MkdirTemp to create a temporary directory
// with the given prefix. It panics on any error.
func (t *TestFramework) MustTempDir(prefix string) string {
path, err := os.MkdirTemp("", prefix)
if err != nil {
panic(err)
}
t.tempDirs = append(t.tempDirs, path)
return path
}
// MustTempFile uses os.CreateTemp to create a temporary file
// with the given pattern. It panics on any error.
func (t *TestFramework) MustTempFile(pattern string) string {
path, err := os.CreateTemp("", pattern)
if err != nil {
panic(err)
}
t.tempFiles = append(t.tempFiles, path.Name())
return path.Name()
}
// EnsureRuntimeDeps creates a directory which contains faked runtime
// dependencies for the tests.
func (t *TestFramework) EnsureRuntimeDeps() string {
dir := t.MustTempDir("crio-testunig-default-runtime-")
for dep, content := range map[string]string{
"crun": "",
"conmon": "#!/bin/sh\necho 'conmon version 2.1.12'",
"nsenter": "",
} {
runtimeDep := filepath.Join(dir, dep)
gomega.Expect(os.WriteFile(runtimeDep, []byte(content), 0o755)).
NotTo(gomega.HaveOccurred())
}
ginkgo.GinkgoTB().Setenv("PATH", dir)
return dir
}
// RunFrameworkSpecs is a convenience wrapper for running tests.
func RunFrameworkSpecs(t *testing.T, suiteName string) {
ginkgo.RunSpecs(t, suiteName)
}
package containerstoragemock
import (
"fmt"
istorage "github.com/containers/image/v5/storage"
"go.uber.org/mock/gomock"
"runtime"
"strings"
)
var (
mockCtrl *gomock.Controller
)
func init() {
t := &FuzzTester{}
mockCtrl = gomock.NewController(t)
}
func FuzzParseStoreReference(data []byte) int {
defer catchPanics()
if len(string(data)) <= 3 && strings.ContainsAny(string(data), "@:") {
return 0
}
t := &FuzzTester{}
mockCtrl = gomock.NewController(t)
storeMock := NewMockStore(mockCtrl)
_ = mockParseStoreReference(storeMock, string(data))
_, _ = istorage.Transport.ParseStoreReference(storeMock, string(data))
return 1
}
func catchPanics() {
if r := recover(); r != nil {
var err string
switch r.(type) {
case string:
err = r.(string)
case runtime.Error:
err = r.(runtime.Error).Error()
case error:
err = r.(error).Error()
}
if strings.Contains(err, "Fatal panic from fuzzer") {
return
} else {
panic(err)
}
}
}
type FuzzTester struct {
}
func (ft *FuzzTester) Cleanup(func()) {}
func (ft *FuzzTester) Setenv(kev, value string) {}
func (ft *FuzzTester) Error(args ...interface{}) {}
func (ft *FuzzTester) Errorf(format string, args ...interface{}) {}
func (ft *FuzzTester) Fail() {}
func (ft *FuzzTester) FailNow() {}
func (ft *FuzzTester) Failed() bool { return true }
func (ft *FuzzTester) Fatal(args ...interface{}) { panic("Fatal panic from fuzzer") }
func (ft *FuzzTester) Fatalf(format string, args ...interface{}) {
panic(fmt.Sprintf("Fatal panic from fuzzer"))
}
func (ft *FuzzTester) Helper() {}
func (ft *FuzzTester) Log(args ...interface{}) {}
func (ft *FuzzTester) Logf(format string, args ...interface{}) {}
func (ft *FuzzTester) Name() string { return "fuzz" }
func (ft *FuzzTester) Parallel() {}
func (ft *FuzzTester) Skip(args ...interface{}) {}
func (ft *FuzzTester) SkipNow() {}
func (ft *FuzzTester) Skipf(format string, args ...interface{}) {}
func (ft *FuzzTester) Skipped() bool { return true }
func (ft *FuzzTester) TempDir() string { return "/tmp" }
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/containers/storage (interfaces: Store)
//
// Generated by this command:
//
// mockgen -package containerstoragemock -destination ./test/mocks/containerstorage/containerstorage.go github.com/containers/storage Store
//
// Package containerstoragemock is a generated GoMock package.
package containerstoragemock
import (
io "io"
reflect "reflect"
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
archive "github.com/containers/storage/pkg/archive"
idtools "github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
gomock "go.uber.org/mock/gomock"
)
// MockStore is a mock of Store interface.
type MockStore struct {
ctrl *gomock.Controller
recorder *MockStoreMockRecorder
isgomock struct{}
}
// MockStoreMockRecorder is the mock recorder for MockStore.
type MockStoreMockRecorder struct {
mock *MockStore
}
// NewMockStore creates a new mock instance.
func NewMockStore(ctrl *gomock.Controller) *MockStore {
mock := &MockStore{ctrl: ctrl}
mock.recorder = &MockStoreMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockStore) EXPECT() *MockStoreMockRecorder {
return m.recorder
}
// AddNames mocks base method.
func (m *MockStore) AddNames(id string, names []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddNames", id, names)
ret0, _ := ret[0].(error)
return ret0
}
// AddNames indicates an expected call of AddNames.
func (mr *MockStoreMockRecorder) AddNames(id, names any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNames", reflect.TypeOf((*MockStore)(nil).AddNames), id, names)
}
// ApplyDiff mocks base method.
func (m *MockStore) ApplyDiff(to string, diff io.Reader) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyDiff", to, diff)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ApplyDiff indicates an expected call of ApplyDiff.
func (mr *MockStoreMockRecorder) ApplyDiff(to, diff any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyDiff", reflect.TypeOf((*MockStore)(nil).ApplyDiff), to, diff)
}
// ApplyStagedLayer mocks base method.
func (m *MockStore) ApplyStagedLayer(args storage.ApplyStagedLayerOptions) (*storage.Layer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyStagedLayer", args)
ret0, _ := ret[0].(*storage.Layer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ApplyStagedLayer indicates an expected call of ApplyStagedLayer.
func (mr *MockStoreMockRecorder) ApplyStagedLayer(args any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyStagedLayer", reflect.TypeOf((*MockStore)(nil).ApplyStagedLayer), args)
}
// Changes mocks base method.
func (m *MockStore) Changes(from, to string) ([]archive.Change, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Changes", from, to)
ret0, _ := ret[0].([]archive.Change)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Changes indicates an expected call of Changes.
func (mr *MockStoreMockRecorder) Changes(from, to any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Changes", reflect.TypeOf((*MockStore)(nil).Changes), from, to)
}
// Check mocks base method.
func (m *MockStore) Check(options *storage.CheckOptions) (storage.CheckReport, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Check", options)
ret0, _ := ret[0].(storage.CheckReport)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Check indicates an expected call of Check.
func (mr *MockStoreMockRecorder) Check(options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Check", reflect.TypeOf((*MockStore)(nil).Check), options)
}
// CleanupStagedLayer mocks base method.
func (m *MockStore) CleanupStagedLayer(diffOutput *graphdriver.DriverWithDifferOutput) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CleanupStagedLayer", diffOutput)
ret0, _ := ret[0].(error)
return ret0
}
// CleanupStagedLayer indicates an expected call of CleanupStagedLayer.
func (mr *MockStoreMockRecorder) CleanupStagedLayer(diffOutput any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStagedLayer", reflect.TypeOf((*MockStore)(nil).CleanupStagedLayer), diffOutput)
}
// Container mocks base method.
func (m *MockStore) Container(id string) (*storage.Container, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Container", id)
ret0, _ := ret[0].(*storage.Container)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Container indicates an expected call of Container.
func (mr *MockStoreMockRecorder) Container(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Container", reflect.TypeOf((*MockStore)(nil).Container), id)
}
// ContainerBigData mocks base method.
func (m *MockStore) ContainerBigData(id, key string) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerBigData", id, key)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerBigData indicates an expected call of ContainerBigData.
func (mr *MockStoreMockRecorder) ContainerBigData(id, key any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerBigData", reflect.TypeOf((*MockStore)(nil).ContainerBigData), id, key)
}
// ContainerBigDataDigest mocks base method.
func (m *MockStore) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerBigDataDigest", id, key)
ret0, _ := ret[0].(digest.Digest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerBigDataDigest indicates an expected call of ContainerBigDataDigest.
func (mr *MockStoreMockRecorder) ContainerBigDataDigest(id, key any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerBigDataDigest", reflect.TypeOf((*MockStore)(nil).ContainerBigDataDigest), id, key)
}
// ContainerBigDataSize mocks base method.
func (m *MockStore) ContainerBigDataSize(id, key string) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerBigDataSize", id, key)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerBigDataSize indicates an expected call of ContainerBigDataSize.
func (mr *MockStoreMockRecorder) ContainerBigDataSize(id, key any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerBigDataSize", reflect.TypeOf((*MockStore)(nil).ContainerBigDataSize), id, key)
}
// ContainerByLayer mocks base method.
func (m *MockStore) ContainerByLayer(id string) (*storage.Container, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerByLayer", id)
ret0, _ := ret[0].(*storage.Container)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerByLayer indicates an expected call of ContainerByLayer.
func (mr *MockStoreMockRecorder) ContainerByLayer(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerByLayer", reflect.TypeOf((*MockStore)(nil).ContainerByLayer), id)
}
// ContainerDirectory mocks base method.
func (m *MockStore) ContainerDirectory(id string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerDirectory", id)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerDirectory indicates an expected call of ContainerDirectory.
func (mr *MockStoreMockRecorder) ContainerDirectory(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerDirectory", reflect.TypeOf((*MockStore)(nil).ContainerDirectory), id)
}
// ContainerParentOwners mocks base method.
func (m *MockStore) ContainerParentOwners(id string) ([]int, []int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerParentOwners", id)
ret0, _ := ret[0].([]int)
ret1, _ := ret[1].([]int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// ContainerParentOwners indicates an expected call of ContainerParentOwners.
func (mr *MockStoreMockRecorder) ContainerParentOwners(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerParentOwners", reflect.TypeOf((*MockStore)(nil).ContainerParentOwners), id)
}
// ContainerRunDirectory mocks base method.
func (m *MockStore) ContainerRunDirectory(id string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerRunDirectory", id)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerRunDirectory indicates an expected call of ContainerRunDirectory.
func (mr *MockStoreMockRecorder) ContainerRunDirectory(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerRunDirectory", reflect.TypeOf((*MockStore)(nil).ContainerRunDirectory), id)
}
// ContainerSize mocks base method.
func (m *MockStore) ContainerSize(id string) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ContainerSize", id)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ContainerSize indicates an expected call of ContainerSize.
func (mr *MockStoreMockRecorder) ContainerSize(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerSize", reflect.TypeOf((*MockStore)(nil).ContainerSize), id)
}
// Containers mocks base method.
func (m *MockStore) Containers() ([]storage.Container, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Containers")
ret0, _ := ret[0].([]storage.Container)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Containers indicates an expected call of Containers.
func (mr *MockStoreMockRecorder) Containers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Containers", reflect.TypeOf((*MockStore)(nil).Containers))
}
// CreateContainer mocks base method.
func (m *MockStore) CreateContainer(id string, names []string, image, layer, metadata string, options *storage.ContainerOptions) (*storage.Container, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateContainer", id, names, image, layer, metadata, options)
ret0, _ := ret[0].(*storage.Container)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateContainer indicates an expected call of CreateContainer.
func (mr *MockStoreMockRecorder) CreateContainer(id, names, image, layer, metadata, options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateContainer", reflect.TypeOf((*MockStore)(nil).CreateContainer), id, names, image, layer, metadata, options)
}
// CreateImage mocks base method.
func (m *MockStore) CreateImage(id string, names []string, layer, metadata string, options *storage.ImageOptions) (*storage.Image, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateImage", id, names, layer, metadata, options)
ret0, _ := ret[0].(*storage.Image)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateImage indicates an expected call of CreateImage.
func (mr *MockStoreMockRecorder) CreateImage(id, names, layer, metadata, options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateImage", reflect.TypeOf((*MockStore)(nil).CreateImage), id, names, layer, metadata, options)
}
// CreateLayer mocks base method.
func (m *MockStore) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *storage.LayerOptions) (*storage.Layer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateLayer", id, parent, names, mountLabel, writeable, options)
ret0, _ := ret[0].(*storage.Layer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateLayer indicates an expected call of CreateLayer.
func (mr *MockStoreMockRecorder) CreateLayer(id, parent, names, mountLabel, writeable, options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLayer", reflect.TypeOf((*MockStore)(nil).CreateLayer), id, parent, names, mountLabel, writeable, options)
}
// Dedup mocks base method.
func (m *MockStore) Dedup(arg0 storage.DedupArgs) (graphdriver.DedupResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Dedup", arg0)
ret0, _ := ret[0].(graphdriver.DedupResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Dedup indicates an expected call of Dedup.
func (mr *MockStoreMockRecorder) Dedup(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dedup", reflect.TypeOf((*MockStore)(nil).Dedup), arg0)
}
// Delete mocks base method.
func (m *MockStore) Delete(id string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", id)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockStoreMockRecorder) Delete(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockStore)(nil).Delete), id)
}
// DeleteContainer mocks base method.
func (m *MockStore) DeleteContainer(id string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteContainer", id)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteContainer indicates an expected call of DeleteContainer.
func (mr *MockStoreMockRecorder) DeleteContainer(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteContainer", reflect.TypeOf((*MockStore)(nil).DeleteContainer), id)
}
// DeleteImage mocks base method.
func (m *MockStore) DeleteImage(id string, commit bool) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteImage", id, commit)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteImage indicates an expected call of DeleteImage.
func (mr *MockStoreMockRecorder) DeleteImage(id, commit any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteImage", reflect.TypeOf((*MockStore)(nil).DeleteImage), id, commit)
}
// DeleteLayer mocks base method.
func (m *MockStore) DeleteLayer(id string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteLayer", id)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteLayer indicates an expected call of DeleteLayer.
func (mr *MockStoreMockRecorder) DeleteLayer(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLayer", reflect.TypeOf((*MockStore)(nil).DeleteLayer), id)
}
// Diff mocks base method.
func (m *MockStore) Diff(from, to string, options *storage.DiffOptions) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Diff", from, to, options)
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Diff indicates an expected call of Diff.
func (mr *MockStoreMockRecorder) Diff(from, to, options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Diff", reflect.TypeOf((*MockStore)(nil).Diff), from, to, options)
}
// DiffSize mocks base method.
func (m *MockStore) DiffSize(from, to string) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DiffSize", from, to)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DiffSize indicates an expected call of DiffSize.
func (mr *MockStoreMockRecorder) DiffSize(from, to any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiffSize", reflect.TypeOf((*MockStore)(nil).DiffSize), from, to)
}
// DifferTarget mocks base method.
func (m *MockStore) DifferTarget(id string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DifferTarget", id)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DifferTarget indicates an expected call of DifferTarget.
func (mr *MockStoreMockRecorder) DifferTarget(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DifferTarget", reflect.TypeOf((*MockStore)(nil).DifferTarget), id)
}
// Exists mocks base method.
func (m *MockStore) Exists(id string) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Exists", id)
ret0, _ := ret[0].(bool)
return ret0
}
// Exists indicates an expected call of Exists.
func (mr *MockStoreMockRecorder) Exists(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockStore)(nil).Exists), id)
}
// Free mocks base method.
func (m *MockStore) Free() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Free")
}
// Free indicates an expected call of Free.
func (mr *MockStoreMockRecorder) Free() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Free", reflect.TypeOf((*MockStore)(nil).Free))
}
// FromContainerDirectory mocks base method.
func (m *MockStore) FromContainerDirectory(id, file string) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FromContainerDirectory", id, file)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FromContainerDirectory indicates an expected call of FromContainerDirectory.
func (mr *MockStoreMockRecorder) FromContainerDirectory(id, file any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FromContainerDirectory", reflect.TypeOf((*MockStore)(nil).FromContainerDirectory), id, file)
}
// FromContainerRunDirectory mocks base method.
func (m *MockStore) FromContainerRunDirectory(id, file string) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FromContainerRunDirectory", id, file)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FromContainerRunDirectory indicates an expected call of FromContainerRunDirectory.
func (mr *MockStoreMockRecorder) FromContainerRunDirectory(id, file any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FromContainerRunDirectory", reflect.TypeOf((*MockStore)(nil).FromContainerRunDirectory), id, file)
}
// GIDMap mocks base method.
func (m *MockStore) GIDMap() []idtools.IDMap {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GIDMap")
ret0, _ := ret[0].([]idtools.IDMap)
return ret0
}
// GIDMap indicates an expected call of GIDMap.
func (mr *MockStoreMockRecorder) GIDMap() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GIDMap", reflect.TypeOf((*MockStore)(nil).GIDMap))
}
// GarbageCollect mocks base method.
func (m *MockStore) GarbageCollect() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GarbageCollect")
ret0, _ := ret[0].(error)
return ret0
}
// GarbageCollect indicates an expected call of GarbageCollect.
func (mr *MockStoreMockRecorder) GarbageCollect() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollect", reflect.TypeOf((*MockStore)(nil).GarbageCollect))
}
// GetDigestLock mocks base method.
func (m *MockStore) GetDigestLock(arg0 digest.Digest) (storage.Locker, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetDigestLock", arg0)
ret0, _ := ret[0].(storage.Locker)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetDigestLock indicates an expected call of GetDigestLock.
func (mr *MockStoreMockRecorder) GetDigestLock(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDigestLock", reflect.TypeOf((*MockStore)(nil).GetDigestLock), arg0)
}
// GraphDriver mocks base method.
func (m *MockStore) GraphDriver() (graphdriver.Driver, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GraphDriver")
ret0, _ := ret[0].(graphdriver.Driver)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GraphDriver indicates an expected call of GraphDriver.
func (mr *MockStoreMockRecorder) GraphDriver() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GraphDriver", reflect.TypeOf((*MockStore)(nil).GraphDriver))
}
// GraphDriverName mocks base method.
func (m *MockStore) GraphDriverName() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GraphDriverName")
ret0, _ := ret[0].(string)
return ret0
}
// GraphDriverName indicates an expected call of GraphDriverName.
func (mr *MockStoreMockRecorder) GraphDriverName() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GraphDriverName", reflect.TypeOf((*MockStore)(nil).GraphDriverName))
}
// GraphOptions mocks base method.
func (m *MockStore) GraphOptions() []string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GraphOptions")
ret0, _ := ret[0].([]string)
return ret0
}
// GraphOptions indicates an expected call of GraphOptions.
func (mr *MockStoreMockRecorder) GraphOptions() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GraphOptions", reflect.TypeOf((*MockStore)(nil).GraphOptions))
}
// GraphRoot mocks base method.
func (m *MockStore) GraphRoot() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GraphRoot")
ret0, _ := ret[0].(string)
return ret0
}
// GraphRoot indicates an expected call of GraphRoot.
func (mr *MockStoreMockRecorder) GraphRoot() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GraphRoot", reflect.TypeOf((*MockStore)(nil).GraphRoot))
}
// Image mocks base method.
func (m *MockStore) Image(id string) (*storage.Image, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Image", id)
ret0, _ := ret[0].(*storage.Image)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Image indicates an expected call of Image.
func (mr *MockStoreMockRecorder) Image(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Image", reflect.TypeOf((*MockStore)(nil).Image), id)
}
// ImageBigData mocks base method.
func (m *MockStore) ImageBigData(id, key string) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageBigData", id, key)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageBigData indicates an expected call of ImageBigData.
func (mr *MockStoreMockRecorder) ImageBigData(id, key any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageBigData", reflect.TypeOf((*MockStore)(nil).ImageBigData), id, key)
}
// ImageBigDataDigest mocks base method.
func (m *MockStore) ImageBigDataDigest(id, key string) (digest.Digest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageBigDataDigest", id, key)
ret0, _ := ret[0].(digest.Digest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageBigDataDigest indicates an expected call of ImageBigDataDigest.
func (mr *MockStoreMockRecorder) ImageBigDataDigest(id, key any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageBigDataDigest", reflect.TypeOf((*MockStore)(nil).ImageBigDataDigest), id, key)
}
// ImageBigDataSize mocks base method.
func (m *MockStore) ImageBigDataSize(id, key string) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageBigDataSize", id, key)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageBigDataSize indicates an expected call of ImageBigDataSize.
func (mr *MockStoreMockRecorder) ImageBigDataSize(id, key any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageBigDataSize", reflect.TypeOf((*MockStore)(nil).ImageBigDataSize), id, key)
}
// ImageDirectory mocks base method.
func (m *MockStore) ImageDirectory(id string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageDirectory", id)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageDirectory indicates an expected call of ImageDirectory.
func (mr *MockStoreMockRecorder) ImageDirectory(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageDirectory", reflect.TypeOf((*MockStore)(nil).ImageDirectory), id)
}
// ImageRunDirectory mocks base method.
func (m *MockStore) ImageRunDirectory(id string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageRunDirectory", id)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageRunDirectory indicates an expected call of ImageRunDirectory.
func (mr *MockStoreMockRecorder) ImageRunDirectory(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageRunDirectory", reflect.TypeOf((*MockStore)(nil).ImageRunDirectory), id)
}
// ImageSize mocks base method.
func (m *MockStore) ImageSize(id string) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageSize", id)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImageSize indicates an expected call of ImageSize.
func (mr *MockStoreMockRecorder) ImageSize(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageSize", reflect.TypeOf((*MockStore)(nil).ImageSize), id)
}
// ImageStore mocks base method.
func (m *MockStore) ImageStore() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImageStore")
ret0, _ := ret[0].(string)
return ret0
}
// ImageStore indicates an expected call of ImageStore.
func (mr *MockStoreMockRecorder) ImageStore() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStore", reflect.TypeOf((*MockStore)(nil).ImageStore))
}
// Images mocks base method.
func (m *MockStore) Images() ([]storage.Image, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Images")
ret0, _ := ret[0].([]storage.Image)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Images indicates an expected call of Images.
func (mr *MockStoreMockRecorder) Images() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Images", reflect.TypeOf((*MockStore)(nil).Images))
}
// ImagesByDigest mocks base method.
func (m *MockStore) ImagesByDigest(d digest.Digest) ([]*storage.Image, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImagesByDigest", d)
ret0, _ := ret[0].([]*storage.Image)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImagesByDigest indicates an expected call of ImagesByDigest.
func (mr *MockStoreMockRecorder) ImagesByDigest(d any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImagesByDigest", reflect.TypeOf((*MockStore)(nil).ImagesByDigest), d)
}
// ImagesByTopLayer mocks base method.
func (m *MockStore) ImagesByTopLayer(id string) ([]*storage.Image, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImagesByTopLayer", id)
ret0, _ := ret[0].([]*storage.Image)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImagesByTopLayer indicates an expected call of ImagesByTopLayer.
func (mr *MockStoreMockRecorder) ImagesByTopLayer(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImagesByTopLayer", reflect.TypeOf((*MockStore)(nil).ImagesByTopLayer), id)
}
// Layer mocks base method.
func (m *MockStore) Layer(id string) (*storage.Layer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Layer", id)
ret0, _ := ret[0].(*storage.Layer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Layer indicates an expected call of Layer.
func (mr *MockStoreMockRecorder) Layer(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Layer", reflect.TypeOf((*MockStore)(nil).Layer), id)
}
// LayerBigData mocks base method.
func (m *MockStore) LayerBigData(id, key string) (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LayerBigData", id, key)
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LayerBigData indicates an expected call of LayerBigData.
func (mr *MockStoreMockRecorder) LayerBigData(id, key any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LayerBigData", reflect.TypeOf((*MockStore)(nil).LayerBigData), id, key)
}
// LayerParentOwners mocks base method.
func (m *MockStore) LayerParentOwners(id string) ([]int, []int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LayerParentOwners", id)
ret0, _ := ret[0].([]int)
ret1, _ := ret[1].([]int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// LayerParentOwners indicates an expected call of LayerParentOwners.
func (mr *MockStoreMockRecorder) LayerParentOwners(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LayerParentOwners", reflect.TypeOf((*MockStore)(nil).LayerParentOwners), id)
}
// LayerSize mocks base method.
func (m *MockStore) LayerSize(id string) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LayerSize", id)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LayerSize indicates an expected call of LayerSize.
func (mr *MockStoreMockRecorder) LayerSize(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LayerSize", reflect.TypeOf((*MockStore)(nil).LayerSize), id)
}
// Layers mocks base method.
func (m *MockStore) Layers() ([]storage.Layer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Layers")
ret0, _ := ret[0].([]storage.Layer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Layers indicates an expected call of Layers.
func (mr *MockStoreMockRecorder) Layers() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Layers", reflect.TypeOf((*MockStore)(nil).Layers))
}
// LayersByCompressedDigest mocks base method.
func (m *MockStore) LayersByCompressedDigest(d digest.Digest) ([]storage.Layer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LayersByCompressedDigest", d)
ret0, _ := ret[0].([]storage.Layer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LayersByCompressedDigest indicates an expected call of LayersByCompressedDigest.
func (mr *MockStoreMockRecorder) LayersByCompressedDigest(d any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LayersByCompressedDigest", reflect.TypeOf((*MockStore)(nil).LayersByCompressedDigest), d)
}
// LayersByTOCDigest mocks base method.
func (m *MockStore) LayersByTOCDigest(d digest.Digest) ([]storage.Layer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LayersByTOCDigest", d)
ret0, _ := ret[0].([]storage.Layer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LayersByTOCDigest indicates an expected call of LayersByTOCDigest.
func (mr *MockStoreMockRecorder) LayersByTOCDigest(d any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LayersByTOCDigest", reflect.TypeOf((*MockStore)(nil).LayersByTOCDigest), d)
}
// LayersByUncompressedDigest mocks base method.
func (m *MockStore) LayersByUncompressedDigest(d digest.Digest) ([]storage.Layer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LayersByUncompressedDigest", d)
ret0, _ := ret[0].([]storage.Layer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LayersByUncompressedDigest indicates an expected call of LayersByUncompressedDigest.
func (mr *MockStoreMockRecorder) LayersByUncompressedDigest(d any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LayersByUncompressedDigest", reflect.TypeOf((*MockStore)(nil).LayersByUncompressedDigest), d)
}
// ListContainerBigData mocks base method.
func (m *MockStore) ListContainerBigData(id string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListContainerBigData", id)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListContainerBigData indicates an expected call of ListContainerBigData.
func (mr *MockStoreMockRecorder) ListContainerBigData(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListContainerBigData", reflect.TypeOf((*MockStore)(nil).ListContainerBigData), id)
}
// ListImageBigData mocks base method.
func (m *MockStore) ListImageBigData(id string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListImageBigData", id)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListImageBigData indicates an expected call of ListImageBigData.
func (mr *MockStoreMockRecorder) ListImageBigData(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImageBigData", reflect.TypeOf((*MockStore)(nil).ListImageBigData), id)
}
// ListLayerBigData mocks base method.
func (m *MockStore) ListLayerBigData(id string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListLayerBigData", id)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListLayerBigData indicates an expected call of ListLayerBigData.
func (mr *MockStoreMockRecorder) ListLayerBigData(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLayerBigData", reflect.TypeOf((*MockStore)(nil).ListLayerBigData), id)
}
// Lookup mocks base method.
func (m *MockStore) Lookup(name string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Lookup", name)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Lookup indicates an expected call of Lookup.
func (mr *MockStoreMockRecorder) Lookup(name any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lookup", reflect.TypeOf((*MockStore)(nil).Lookup), name)
}
// LookupAdditionalLayer mocks base method.
func (m *MockStore) LookupAdditionalLayer(tocDigest digest.Digest, imageref string) (storage.AdditionalLayer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LookupAdditionalLayer", tocDigest, imageref)
ret0, _ := ret[0].(storage.AdditionalLayer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LookupAdditionalLayer indicates an expected call of LookupAdditionalLayer.
func (mr *MockStoreMockRecorder) LookupAdditionalLayer(tocDigest, imageref any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LookupAdditionalLayer", reflect.TypeOf((*MockStore)(nil).LookupAdditionalLayer), tocDigest, imageref)
}
// Metadata mocks base method.
func (m *MockStore) Metadata(id string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Metadata", id)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Metadata indicates an expected call of Metadata.
func (mr *MockStoreMockRecorder) Metadata(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockStore)(nil).Metadata), id)
}
// Mount mocks base method.
func (m *MockStore) Mount(id, mountLabel string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Mount", id, mountLabel)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Mount indicates an expected call of Mount.
func (mr *MockStoreMockRecorder) Mount(id, mountLabel any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mount", reflect.TypeOf((*MockStore)(nil).Mount), id, mountLabel)
}
// MountImage mocks base method.
func (m *MockStore) MountImage(id string, mountOptions []string, mountLabel string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MountImage", id, mountOptions, mountLabel)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MountImage indicates an expected call of MountImage.
func (mr *MockStoreMockRecorder) MountImage(id, mountOptions, mountLabel any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MountImage", reflect.TypeOf((*MockStore)(nil).MountImage), id, mountOptions, mountLabel)
}
// Mounted mocks base method.
func (m *MockStore) Mounted(id string) (int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Mounted", id)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Mounted indicates an expected call of Mounted.
func (mr *MockStoreMockRecorder) Mounted(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mounted", reflect.TypeOf((*MockStore)(nil).Mounted), id)
}
// MultiList mocks base method.
func (m *MockStore) MultiList(arg0 storage.MultiListOptions) (storage.MultiListResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MultiList", arg0)
ret0, _ := ret[0].(storage.MultiListResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MultiList indicates an expected call of MultiList.
func (mr *MockStoreMockRecorder) MultiList(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MultiList", reflect.TypeOf((*MockStore)(nil).MultiList), arg0)
}
// Names mocks base method.
func (m *MockStore) Names(id string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Names", id)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Names indicates an expected call of Names.
func (mr *MockStoreMockRecorder) Names(id any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Names", reflect.TypeOf((*MockStore)(nil).Names), id)
}
// PrepareStagedLayer mocks base method.
func (m *MockStore) PrepareStagedLayer(options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (*graphdriver.DriverWithDifferOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PrepareStagedLayer", options, differ)
ret0, _ := ret[0].(*graphdriver.DriverWithDifferOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PrepareStagedLayer indicates an expected call of PrepareStagedLayer.
func (mr *MockStoreMockRecorder) PrepareStagedLayer(options, differ any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareStagedLayer", reflect.TypeOf((*MockStore)(nil).PrepareStagedLayer), options, differ)
}
// PullOptions mocks base method.
func (m *MockStore) PullOptions() map[string]string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PullOptions")
ret0, _ := ret[0].(map[string]string)
return ret0
}
// PullOptions indicates an expected call of PullOptions.
func (mr *MockStoreMockRecorder) PullOptions() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullOptions", reflect.TypeOf((*MockStore)(nil).PullOptions))
}
// PutLayer mocks base method.
func (m *MockStore) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *storage.LayerOptions, diff io.Reader) (*storage.Layer, int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PutLayer", id, parent, names, mountLabel, writeable, options, diff)
ret0, _ := ret[0].(*storage.Layer)
ret1, _ := ret[1].(int64)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// PutLayer indicates an expected call of PutLayer.
func (mr *MockStoreMockRecorder) PutLayer(id, parent, names, mountLabel, writeable, options, diff any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutLayer", reflect.TypeOf((*MockStore)(nil).PutLayer), id, parent, names, mountLabel, writeable, options, diff)
}
// RemoveNames mocks base method.
func (m *MockStore) RemoveNames(id string, names []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemoveNames", id, names)
ret0, _ := ret[0].(error)
return ret0
}
// RemoveNames indicates an expected call of RemoveNames.
func (mr *MockStoreMockRecorder) RemoveNames(id, names any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveNames", reflect.TypeOf((*MockStore)(nil).RemoveNames), id, names)
}
// Repair mocks base method.
func (m *MockStore) Repair(report storage.CheckReport, options *storage.RepairOptions) []error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Repair", report, options)
ret0, _ := ret[0].([]error)
return ret0
}
// Repair indicates an expected call of Repair.
func (mr *MockStoreMockRecorder) Repair(report, options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repair", reflect.TypeOf((*MockStore)(nil).Repair), report, options)
}
// RunRoot mocks base method.
func (m *MockStore) RunRoot() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunRoot")
ret0, _ := ret[0].(string)
return ret0
}
// RunRoot indicates an expected call of RunRoot.
func (mr *MockStoreMockRecorder) RunRoot() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunRoot", reflect.TypeOf((*MockStore)(nil).RunRoot))
}
// SetContainerBigData mocks base method.
func (m *MockStore) SetContainerBigData(id, key string, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetContainerBigData", id, key, data)
ret0, _ := ret[0].(error)
return ret0
}
// SetContainerBigData indicates an expected call of SetContainerBigData.
func (mr *MockStoreMockRecorder) SetContainerBigData(id, key, data any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetContainerBigData", reflect.TypeOf((*MockStore)(nil).SetContainerBigData), id, key, data)
}
// SetContainerDirectoryFile mocks base method.
func (m *MockStore) SetContainerDirectoryFile(id, file string, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetContainerDirectoryFile", id, file, data)
ret0, _ := ret[0].(error)
return ret0
}
// SetContainerDirectoryFile indicates an expected call of SetContainerDirectoryFile.
func (mr *MockStoreMockRecorder) SetContainerDirectoryFile(id, file, data any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetContainerDirectoryFile", reflect.TypeOf((*MockStore)(nil).SetContainerDirectoryFile), id, file, data)
}
// SetContainerRunDirectoryFile mocks base method.
func (m *MockStore) SetContainerRunDirectoryFile(id, file string, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetContainerRunDirectoryFile", id, file, data)
ret0, _ := ret[0].(error)
return ret0
}
// SetContainerRunDirectoryFile indicates an expected call of SetContainerRunDirectoryFile.
func (mr *MockStoreMockRecorder) SetContainerRunDirectoryFile(id, file, data any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetContainerRunDirectoryFile", reflect.TypeOf((*MockStore)(nil).SetContainerRunDirectoryFile), id, file, data)
}
// SetImageBigData mocks base method.
func (m *MockStore) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetImageBigData", id, key, data, digestManifest)
ret0, _ := ret[0].(error)
return ret0
}
// SetImageBigData indicates an expected call of SetImageBigData.
func (mr *MockStoreMockRecorder) SetImageBigData(id, key, data, digestManifest any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetImageBigData", reflect.TypeOf((*MockStore)(nil).SetImageBigData), id, key, data, digestManifest)
}
// SetLayerBigData mocks base method.
func (m *MockStore) SetLayerBigData(id, key string, data io.Reader) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetLayerBigData", id, key, data)
ret0, _ := ret[0].(error)
return ret0
}
// SetLayerBigData indicates an expected call of SetLayerBigData.
func (mr *MockStoreMockRecorder) SetLayerBigData(id, key, data any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLayerBigData", reflect.TypeOf((*MockStore)(nil).SetLayerBigData), id, key, data)
}
// SetMetadata mocks base method.
func (m *MockStore) SetMetadata(id, metadata string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetMetadata", id, metadata)
ret0, _ := ret[0].(error)
return ret0
}
// SetMetadata indicates an expected call of SetMetadata.
func (mr *MockStoreMockRecorder) SetMetadata(id, metadata any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMetadata", reflect.TypeOf((*MockStore)(nil).SetMetadata), id, metadata)
}
// SetNames mocks base method.
func (m *MockStore) SetNames(id string, names []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetNames", id, names)
ret0, _ := ret[0].(error)
return ret0
}
// SetNames indicates an expected call of SetNames.
func (mr *MockStoreMockRecorder) SetNames(id, names any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNames", reflect.TypeOf((*MockStore)(nil).SetNames), id, names)
}
// Shutdown mocks base method.
func (m *MockStore) Shutdown(force bool) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Shutdown", force)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Shutdown indicates an expected call of Shutdown.
func (mr *MockStoreMockRecorder) Shutdown(force any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockStore)(nil).Shutdown), force)
}
// Status mocks base method.
func (m *MockStore) Status() ([][2]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Status")
ret0, _ := ret[0].([][2]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Status indicates an expected call of Status.
func (mr *MockStoreMockRecorder) Status() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockStore)(nil).Status))
}
// TransientStore mocks base method.
func (m *MockStore) TransientStore() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TransientStore")
ret0, _ := ret[0].(bool)
return ret0
}
// TransientStore indicates an expected call of TransientStore.
func (mr *MockStoreMockRecorder) TransientStore() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransientStore", reflect.TypeOf((*MockStore)(nil).TransientStore))
}
// UIDMap mocks base method.
func (m *MockStore) UIDMap() []idtools.IDMap {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UIDMap")
ret0, _ := ret[0].([]idtools.IDMap)
return ret0
}
// UIDMap indicates an expected call of UIDMap.
func (mr *MockStoreMockRecorder) UIDMap() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UIDMap", reflect.TypeOf((*MockStore)(nil).UIDMap))
}
// Unmount mocks base method.
func (m *MockStore) Unmount(id string, force bool) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Unmount", id, force)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Unmount indicates an expected call of Unmount.
func (mr *MockStoreMockRecorder) Unmount(id, force any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmount", reflect.TypeOf((*MockStore)(nil).Unmount), id, force)
}
// UnmountImage mocks base method.
func (m *MockStore) UnmountImage(id string, force bool) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UnmountImage", id, force)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UnmountImage indicates an expected call of UnmountImage.
func (mr *MockStoreMockRecorder) UnmountImage(id, force any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnmountImage", reflect.TypeOf((*MockStore)(nil).UnmountImage), id, force)
}
// Version mocks base method.
func (m *MockStore) Version() ([][2]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Version")
ret0, _ := ret[0].([][2]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Version indicates an expected call of Version.
func (mr *MockStoreMockRecorder) Version() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockStore)(nil).Version))
}
// Wipe mocks base method.
func (m *MockStore) Wipe() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Wipe")
ret0, _ := ret[0].(error)
return ret0
}
// Wipe indicates an expected call of Wipe.
func (mr *MockStoreMockRecorder) Wipe() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Wipe", reflect.TypeOf((*MockStore)(nil).Wipe))
}
package containerstoragemock
import (
"fmt"
"strings"
cstorage "github.com/containers/storage"
//containerstoragemock "github.com/cri-o/cri-o/test/mocks/containerstorage"
"go.uber.org/mock/gomock"
//"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
)
var (
testManifest = []byte(`{"schemaVersion": 1,"fsLayers":[{"blobSum": ""}],` +
`"history": [{"v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef57` +
`82a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"\"}\n"}]}`)
)
type mockSequence struct {
first, last *gomock.Call // may be both nil (= the default value of mockSequence) to mean empty sequence
}
// like gomock.inOrder, but can be nested
func inOrder(calls ...interface{}) mockSequence {
var first, last *gomock.Call
// This implementation does a few more assignments and checks than strictly necessary, but it is O(N) and reasonably easy to read, so, whatever.
for i := 0; i < len(calls); i++ {
var elem mockSequence
switch e := calls[i].(type) {
case mockSequence:
elem = e
case *gomock.Call:
elem = mockSequence{e, e}
default:
Fail(fmt.Sprintf("Invalid inOrder parameter %#v", e))
}
if elem.first == nil {
continue
}
if first == nil {
first = elem.first
} else if last != nil {
elem.first.After(last)
}
last = elem.last
}
return mockSequence{first, last}
}
// containers/image/storage.storageReference.StringWithinTransport
func mockStorageReferenceStringWithinTransport(storeMock *MockStore) mockSequence {
return inOrder(
storeMock.EXPECT().GraphOptions().Return([]string{}),
storeMock.EXPECT().GraphDriverName().Return(""),
storeMock.EXPECT().GraphRoot().Return(""),
storeMock.EXPECT().RunRoot().Return(""),
)
}
// containers/image/storage.Transport.ParseStoreReference
func mockParseStoreReference(storeMock *MockStore, expectedImageName string) mockSequence {
// ParseStoreReference calls store.Image() to check whether short strings are possible prefixes of IDs of existing images
// (either using the unambiguous "@idPrefix" syntax, or the ambiguous "idPrefix" syntax).
// None of our tests use ID prefixes (only full IDs), so we can safely and correctly return ErrImageUnknown in all such cases;
// it only matters that we include, or not, the mock expectation.
//
// This hard-codes a heuristic in ParseStoreReference for whether to consider expectedImageName a possible ID prefix.
// The "@" check also happens to exclude full @digest values, which can happen but would not trigger a store.Image() lookup.
var c1 *gomock.Call
if len(expectedImageName) >= 3 && !strings.ContainsAny(expectedImageName, "@:") {
c1 = storeMock.EXPECT().Image(expectedImageName).Return(nil, cstorage.ErrImageUnknown)
}
return inOrder(
c1,
mockStorageReferenceStringWithinTransport(storeMock),
)
}
// containers/image/storage.Transport.GetStoreImage
// expectedImageName must be in the fully normalized format (reference.Named.String())!
// resolvedImageID may be "" to simulate a missing image
func mockGetStoreImage(storeMock *MockStore, expectedImageName, resolvedImageID string) mockSequence {
if resolvedImageID == "" {
return inOrder(
storeMock.EXPECT().Image(expectedImageName).Return(nil, cstorage.ErrImageUnknown),
mockResolveImage(storeMock, expectedImageName, ""),
)
}
return inOrder(
storeMock.EXPECT().Image(expectedImageName).
Return(&cstorage.Image{ID: resolvedImageID, Names: []string{expectedImageName}}, nil),
)
}
// containers/image/storage.storageReference.resolveImage
// expectedImageNameOrID, if a name, must be in the fully normalized format (reference.Named.String())!
// resolvedImageID may be "" to simulate a missing image
func mockResolveImage(storeMock *MockStore, expectedImageNameOrID, resolvedImageID string) mockSequence {
if resolvedImageID == "" {
return inOrder(
storeMock.EXPECT().Image(expectedImageNameOrID).Return(nil, cstorage.ErrImageUnknown),
// Assuming expectedImageNameOrID does not have a digest, so resolveName does not call ImagesByDigest
mockStorageReferenceStringWithinTransport(storeMock),
mockStorageReferenceStringWithinTransport(storeMock),
)
}
return inOrder(
storeMock.EXPECT().Image(expectedImageNameOrID).
Return(&cstorage.Image{ID: resolvedImageID, Names: []string{expectedImageNameOrID}}, nil),
)
}
// containers/image/storage.storageImageSource.getSize
func mockStorageImageSourceGetSize(storeMock *MockStore) mockSequence {
return inOrder(
storeMock.EXPECT().ListImageBigData(gomock.Any()).
Return([]string{""}, nil), // A single entry
storeMock.EXPECT().ImageBigDataSize(gomock.Any(), gomock.Any()).
Return(int64(0), nil),
// FIXME: This should also walk through the layer list and call store.Layer() on each, but we would have to mock the whole layer list.
)
}
// containers/image/storage.storageReference.newImage
func mockNewImage(storeMock *MockStore, expectedImageName, resolvedImageID string) mockSequence {
return inOrder(
mockResolveImage(storeMock, expectedImageName, resolvedImageID),
storeMock.EXPECT().ImageBigData(gomock.Any(), gomock.Any()).
Return(testManifest, nil),
mockStorageImageSourceGetSize(storeMock),
)
}
package cmdrunner
import (
"context"
"os/exec"
)
// Use a singleton instance because there are many modules that may want access
// and having it all go through a shared object like the config or server would
// add a lot of complexity.
var commandRunner CommandRunner
// CommandRunner is an interface for executing commands.
// It gives the option to change the way commands are run server-wide.
type CommandRunner interface {
Command(string, ...string) *exec.Cmd
CommandContext(context.Context, string, ...string) *exec.Cmd
CombinedOutput(string, ...string) ([]byte, error)
}
// prependableCommandRunner is an implementation of CommandRunner.
// It gives the option for all commands that are run to be prepended by another command
// and arguments.
type prependableCommandRunner struct {
prependCmd string
prependArgs []string
}
// PrependCommandsWith updates the commandRunner singleton to have the configured prepended args and command.
func PrependCommandsWith(prependCmd string, prependArgs ...string) {
commandRunner = &prependableCommandRunner{
prependCmd: prependCmd,
prependArgs: prependArgs,
}
}
// CombinedOutput calls CombinedOutput on the defined commandRunner,
// or the default implementation in the exec package if there's no commandRunner defined.
func CombinedOutput(command string, args ...string) ([]byte, error) {
if commandRunner == nil {
return exec.Command(command, args...).CombinedOutput()
}
return commandRunner.CombinedOutput(command, args...)
}
// CombinedOutput returns the combined output of the command, given the prepended cmd/args that were defined.
func (c *prependableCommandRunner) CombinedOutput(command string, args ...string) ([]byte, error) {
return c.Command(command, args...).CombinedOutput()
}
// Command calls Command on the defined commandRunner,
// or the default implementation in the exec package if there's no commandRunner defined.
func Command(cmd string, args ...string) *exec.Cmd {
if commandRunner == nil {
return exec.Command(cmd, args...)
}
return commandRunner.Command(cmd, args...)
}
// CommandContext calls CommandContext on the defined commandRunner,
// or the default implementation in the exec package if there's no commandRunner defined.
func CommandContext(ctx context.Context, cmd string, args ...string) *exec.Cmd {
if commandRunner == nil {
return exec.CommandContext(ctx, cmd, args...)
}
return commandRunner.CommandContext(ctx, cmd, args...)
}
// Command creates an exec.Cmd object. If prependCmd is defined, the command will be prependCmd
// and the args will be prependArgs + cmd + args.
// Otherwise, cmd and args will be as inputted.
func (c *prependableCommandRunner) Command(cmd string, args ...string) *exec.Cmd {
realCmd := cmd
realArgs := args
if c.prependCmd != "" {
realCmd = c.prependCmd
realArgs = c.prependArgs
realArgs = append(realArgs, cmd)
realArgs = append(realArgs, args...)
}
return exec.Command(realCmd, realArgs...)
}
// CommandContext creates an exec.Cmd object. If prependCmd is defined, the command will be prependCmd
// and the args will be prependArgs + cmd + args.
// Otherwise, cmd and args will be as inputted.
func (c *prependableCommandRunner) CommandContext(ctx context.Context, cmd string, args ...string) *exec.Cmd {
realCmd := cmd
realArgs := args
if c.prependCmd != "" {
realCmd = c.prependCmd
realArgs = c.prependArgs
realArgs = append(realArgs, cmd)
realArgs = append(realArgs, args...)
}
return exec.CommandContext(ctx, realCmd, realArgs...)
}
// GetPrependedCmd returns the prepended command if one is configured, else the empty string.
func GetPrependedCmd() string {
if c, ok := commandRunner.(*prependableCommandRunner); ok {
return c.prependCmd
}
return ""
}
// ResetPrependedCmd resets the singleton for more reliable unit testing.
func ResetPrependedCmd() {
commandRunner = nil
}
package utils
import (
"os"
"path/filepath"
"syscall"
)
// GetDiskUsageStats accepts a path to a directory or file
// and returns the number of bytes and inodes used by the path.
func GetDiskUsageStats(path string) (dirSize, inodeCount uint64, _ error) {
if err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
// Walk does not follow symbolic links
if err != nil {
return err
}
dirSize += uint64(info.Size())
inodeCount++
return nil
}); err != nil {
return 0, 0, err
}
return dirSize, inodeCount, nil
}
// IsDirectory tests whether the given path exists and is a directory. It
// follows symlinks.
func IsDirectory(path string) error {
info, err := os.Stat(path)
if err != nil {
return err
}
if !info.Mode().IsDir() {
// Return a PathError to be consistent with os.Stat().
return &os.PathError{
Op: "stat",
Path: path,
Err: syscall.ENOTDIR,
}
}
return nil
}
package utils
import (
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"runtime/pprof"
"strconv"
"time"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/godbus/dbus/v5"
"github.com/moby/sys/user"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"k8s.io/client-go/tools/remotecommand"
types "k8s.io/cri-api/pkg/apis/runtime/v1"
)
// StatusToExitCode converts wait status code to an exit code.
func StatusToExitCode(status int) int {
return ((status) & 0xff00) >> 8
}
func newProp(name string, units any) systemdDbus.Property {
return systemdDbus.Property{
Name: name,
Value: dbus.MakeVariant(units),
}
}
// DetachError is special error which returned in case of container detach.
type DetachError struct{}
func (DetachError) Error() string {
return "detached from container"
}
// CopyDetachable is similar to io.Copy but support a detach key sequence to break out.
func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (int64, error) {
var (
written int64
err error
)
// Sanity check interfaces
if dst == nil || src == nil {
return 0, errors.New("src/dst reader/writer nil")
}
if len(keys) == 0 {
// Default keys : ctrl-p ctrl-q
keys = []byte{16, 17}
}
buf := make([]byte, 32*1024)
for {
nr, er := src.Read(buf)
if nr > 0 {
preserveBuf := []byte{}
for i, key := range keys {
preserveBuf = append(preserveBuf, buf[0:nr]...)
if nr != 1 || buf[0] != key {
break
}
if i == len(keys)-1 {
// src.Close()
return 0, DetachError{}
}
nr, er = src.Read(buf)
}
nw, ew := dst.Write(preserveBuf)
nr = len(preserveBuf)
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er != nil {
if er != io.EOF {
err = er
}
break
}
}
return written, err
}
// WriteGoroutineStacksToFile write goroutine stacks
// to the specified file.
func WriteGoroutineStacksToFile(path string) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0o666)
if err != nil {
return err
}
defer f.Close()
if err := WriteGoroutineStacksTo(f); err != nil {
return err
}
return f.Sync()
}
// WriteGoroutineStacksToFile write goroutine stacks
// to the specified file.
func WriteGoroutineStacksTo(f io.Writer) error {
// Print goroutines stacks using the same format
// as if an unrecoverable panic would occur. The
// internal buffer is 64 MiB, which hopefully
// will be sufficient.
if err := pprof.Lookup("goroutine").WriteTo(f, 2); err != nil {
return fmt.Errorf("write goroutines: %w", err)
}
return nil
}
// GenerateID generates a random unique id.
func GenerateID() (string, error) {
b := make([]byte, 32)
if _, err := rand.Read(b); err != nil {
return "", fmt.Errorf("generate ID: %w", err)
}
return hex.EncodeToString(b), nil
}
// openContainerFile opens a file inside a container rootfs safely.
func openContainerFile(rootfs, path string) (io.ReadCloser, error) {
fp, err := securejoin.SecureJoin(rootfs, path)
if err != nil {
return nil, err
}
fh, err := os.Open(fp)
if err != nil {
// This is needed because a nil *os.File is different to a nil
// io.ReadCloser and this causes GetExecUser to not detect that the
// container file is missing.
return nil, err
}
return fh, nil
}
// GetUserInfo returns UID, GID and additional groups for specified user
// by looking them up in /etc/passwd and /etc/group.
func GetUserInfo(rootfs, userName string) (uid, gid uint32, additionalGids []uint32, _ error) {
// We don't care if we can't open the file because
// not all images will have these files
passwdFile, err := openContainerFile(rootfs, "/etc/passwd")
if err != nil {
logrus.Warnf("Failed to open /etc/passwd: %v", err)
} else {
defer passwdFile.Close()
}
groupFile, err := openContainerFile(rootfs, "/etc/group")
if err != nil {
logrus.Warnf("Failed to open /etc/group: %v", err)
} else {
defer groupFile.Close()
}
execUser, err := user.GetExecUser(userName, nil, passwdFile, groupFile)
if err != nil {
return 0, 0, nil, fmt.Errorf("get exec user: %w", err)
}
uid = uint32(execUser.Uid)
gid = uint32(execUser.Gid)
additionalGids = make([]uint32, 0, len(execUser.Sgids))
for _, g := range execUser.Sgids {
additionalGids = append(additionalGids, uint32(g))
}
return uid, gid, additionalGids, nil
}
// GeneratePasswd generates a container specific passwd file,
// iff uid is not defined in the containers /etc/passwd.
func GeneratePasswd(username string, uid, gid uint32, homedir, rootfs, rundir string) (string, error) {
if _, err := GetUser(rootfs, strconv.Itoa(int(uid))); err == nil {
return "", nil
}
passwdFilePath, stat, err := secureFilePath(rootfs, "/etc/passwd")
if err != nil || stat.Size == 0 {
return "", err
}
if checkFilePermissions(&stat, uid, stat.Uid) {
return "", nil
}
origContent, err := readFileContent(passwdFilePath)
if err != nil || origContent == nil {
return "", err
}
if username == "" {
username = "default"
}
if homedir == "" {
homedir = "/tmp"
}
pwdContent := fmt.Sprintf("%s%s:x:%d:%d:%s user:%s:/sbin/nologin\n", string(origContent), username, uid, gid, username, homedir)
passwdFile := filepath.Join(rundir, "passwd")
return createAndSecureFile(passwdFile, pwdContent, os.FileMode(stat.Mode), int(stat.Uid), int(stat.Gid))
}
// GenerateGroup generates a container specific group file,
// iff gid is not defined in the containers /etc/group.
func GenerateGroup(gid uint32, rootfs, rundir string) (string, error) {
if _, err := GetGroup(rootfs, strconv.Itoa(int(gid))); err == nil {
return "", nil
}
groupFilePath, stat, err := secureFilePath(rootfs, "/etc/group")
if err != nil {
return "", err
}
if checkFilePermissions(&stat, gid, stat.Gid) {
return "", nil
}
origContent, err := readFileContent(groupFilePath)
if err != nil || origContent == nil {
return "", err
}
groupContent := fmt.Sprintf("%s%d:x:%d:\n", string(origContent), gid, gid)
groupFile := filepath.Join(rundir, "group")
return createAndSecureFile(groupFile, groupContent, os.FileMode(stat.Mode), int(stat.Uid), int(stat.Gid))
}
func secureFilePath(rootfs, file string) (string, unix.Stat_t, error) {
path, err := securejoin.SecureJoin(rootfs, file)
if err != nil {
return "", unix.Stat_t{}, fmt.Errorf("unable to follow symlinks to %s file: %w", file, err)
}
var st unix.Stat_t
err = unix.Stat(path, &st)
if err != nil {
if os.IsNotExist(err) {
return "", unix.Stat_t{}, nil // File does not exist
}
return "", unix.Stat_t{}, fmt.Errorf("unable to stat file %s: %w", path, err)
}
return path, st, nil
}
// checkFilePermissions checks file permissions to decide whether to skip file modification.
func checkFilePermissions(stat *unix.Stat_t, id, statID uint32) bool {
if stat.Mode&0o022 != 0 {
return true
}
// Check if the UID/GID matches and if the file is owner writable.
if id == statID && stat.Mode&0o200 != 0 {
return true
}
return false
}
func readFileContent(path string) ([]byte, error) {
content, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, nil // File does not exist
}
return nil, fmt.Errorf("read file: %w", err)
}
return content, nil
}
func createAndSecureFile(path, content string, mode os.FileMode, uid, gid int) (string, error) {
if err := os.WriteFile(path, []byte(content), mode&os.ModePerm); err != nil {
return "", fmt.Errorf("failed to create file: %w", err)
}
if err := os.Chown(path, uid, gid); err != nil {
return "", fmt.Errorf("failed to chown file: %w", err)
}
return path, nil
}
// GetGroup searches for a group in the container's /etc/group file using the provided
// container mount path and group identifier (either name or ID). It returns a matching
// user.Group structure if found. If no matching group is located, it returns
// ErrNoGroupEntries.
func GetGroup(containerMount, groupIDorName string) (*user.Group, error) {
var inputIsName bool
gid, err := strconv.Atoi(groupIDorName)
if err != nil {
inputIsName = true
}
groupDest, err := securejoin.SecureJoin(containerMount, "/etc/group")
if err != nil {
return nil, err
}
groups, err := user.ParseGroupFileFilter(groupDest, func(g user.Group) bool {
if inputIsName {
return g.Name == groupIDorName
}
return g.Gid == gid
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if len(groups) > 0 {
return &groups[0], nil
}
if !inputIsName {
return &user.Group{Gid: gid}, user.ErrNoGroupEntries
}
return nil, user.ErrNoGroupEntries
}
// GetUser takes a containermount path and user name or ID and returns
// a matching User structure from /etc/passwd. If it cannot locate a user
// with the provided information, an ErrNoPasswdEntries is returned.
// When the provided user name was an ID, a User structure with Uid
// set is returned along with ErrNoPasswdEntries.
func GetUser(containerMount, userIDorName string) (*user.User, error) {
var inputIsName bool
uid, err := strconv.Atoi(userIDorName)
if err != nil {
inputIsName = true
}
passwdDest, err := securejoin.SecureJoin(containerMount, "/etc/passwd")
if err != nil {
return nil, err
}
users, err := user.ParsePasswdFileFilter(passwdDest, func(u user.User) bool {
if inputIsName {
return u.Name == userIDorName
}
return u.Uid == uid
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if len(users) > 0 {
return &users[0], nil
}
if !inputIsName {
return &user.User{Uid: uid}, user.ErrNoPasswdEntries
}
return nil, user.ErrNoPasswdEntries
}
// Int32Ptr is a utility function to assign to integer pointer variables.
func Int32Ptr(i int32) *int32 {
return &i
}
// EnsureSaneLogPath is a hack to fix https://issues.k8s.io/44043 which causes
// logPath to be a broken symlink to some magical Docker path. Ideally we
// wouldn't have to deal with this, but until that issue is fixed we have to
// remove the path if it's a broken symlink.
func EnsureSaneLogPath(logPath string) error {
// If the path exists but the resolved path does not, then we have a broken
// symlink and we need to remove it.
fi, err := os.Lstat(logPath)
if err != nil || fi.Mode()&os.ModeSymlink == 0 {
// Non-existent files and non-symlinks aren't our problem.
return nil
}
_, err = os.Stat(logPath)
if os.IsNotExist(err) {
err = os.RemoveAll(logPath)
if err != nil {
return fmt.Errorf("failed to remove bad log path %s: %w", logPath, err)
}
}
return nil
}
func GetLabelOptions(selinuxOptions *types.SELinuxOption) []string {
labels := []string{}
if selinuxOptions != nil {
if selinuxOptions.GetUser() != "" {
labels = append(labels, "user:"+selinuxOptions.GetUser())
}
if selinuxOptions.GetRole() != "" {
labels = append(labels, "role:"+selinuxOptions.GetRole())
}
if selinuxOptions.GetType() != "" {
labels = append(labels, "type:"+selinuxOptions.GetType())
}
if selinuxOptions.GetLevel() != "" {
labels = append(labels, "level:"+selinuxOptions.GetLevel())
}
}
return labels
}
// SyncParent ensures a path's parent directory is synced to disk.
func SyncParent(path string) error {
return Sync(filepath.Dir(path))
}
// Sync ensures a path is synced to disk.
func Sync(path string) error {
f, err := os.OpenFile(path, os.O_RDONLY, 0o755)
if err != nil {
return err
}
defer f.Close()
if err := f.Sync(); err != nil {
return err
}
return nil
}
// HandleResizing spawns a goroutine that processes the resize channel, calling
// resizeFunc for each TerminalSize received from the channel. The resize
// channel must be closed elsewhere to stop the goroutine.
func HandleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(size remotecommand.TerminalSize)) {
if resize == nil {
return
}
go func() {
for {
size, ok := <-resize
if !ok {
return
}
if size.Height < 1 || size.Width < 1 {
continue
}
resizeFunc(size)
}
}()
}
// ParseDuration parses a string that can contain either a human-readable duration
// notation such as "24h" or "5m30s", so a duration with unit, or a string-encoded
// integer value that denotes the number of seconds and returns a corresponding
// `time.Duration` type. Parsing a floating point value encoded as string without
// a duration unit is not supported.
//
// An assumption is made that the duration value cannot be negative, and as such,
// any negative value will be converted to a positive duration automatically.
func ParseDuration(s string) (time.Duration, error) {
var t time.Duration
n, err := strconv.ParseInt(s, 10, 64)
if err == nil {
t = time.Duration(n) * time.Second
} else {
t, err = time.ParseDuration(s)
}
if err != nil {
return 0, err
}
// Assume that time does not move backwards.
if t < 0 {
t = -t
}
return t, nil
}
package utils
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"os"
)
func FuzzGeneratePasswd(data []byte) int {
etcDir := "/tmp/etcDir"
err := os.MkdirAll(etcDir, 0777)
if err != nil {
return 0
}
defer os.RemoveAll(etcDir)
f := fuzz.NewConsumer(data)
err = f.CreateFiles(etcDir)
if err != nil {
return 0
}
uid, gid, _, err := GetUserInfo(etcDir, "root")
if err != nil {
return 0
}
username, err := f.GetString()
if err != nil {
return 0
}
_, _ = GeneratePasswd(username, uid, gid, "", etcDir, etcDir)
return 1
}
package utils
import (
"context"
"errors"
"fmt"
"os"
"time"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
"golang.org/x/sys/unix"
"github.com/cri-o/cri-o/internal/dbusmgr"
)
// RunUnderSystemdScope adds the specified pid to a systemd scope.
func RunUnderSystemdScope(mgr *dbusmgr.DbusConnManager, pid int, slice, unitName string, properties ...systemdDbus.Property) (err error) {
ctx := context.Background()
// sanity check
if mgr == nil {
return errors.New("dbus manager is nil")
}
defaultProperties := []systemdDbus.Property{
newProp("PIDs", []uint32{uint32(pid)}),
newProp("Delegate", true),
newProp("DefaultDependencies", false),
}
properties = append(defaultProperties, properties...)
if slice != "" {
properties = append(properties, systemdDbus.PropSlice(slice))
}
// Make a buffered channel so that the sender (go-systemd's jobComplete)
// won't be blocked on channel send while holding the jobListener lock
// (RHBZ#2082344).
ch := make(chan string, 1)
if err := mgr.RetryOnDisconnect(func(c *systemdDbus.Conn) error {
_, err = c.StartTransientUnitContext(ctx, unitName, "replace", properties, ch)
return err
}); err != nil {
return fmt.Errorf("start transient unit %q: %w", unitName, err)
}
// Wait for the job status.
select {
case s := <-ch:
close(ch)
if s != "done" {
return fmt.Errorf("error moving conmon with pid %d to systemd unit %s: got %s", pid, unitName, s)
}
case <-time.After(time.Minute * 6):
// This case is a work around to catch situations where the dbus library sends the
// request but it unexpectedly disappears. We set the timeout very high to make sure
// we wait as long as possible to catch situations where dbus is overwhelmed.
// We also don't use the native context cancelling behavior of the dbus library,
// because experience has shown that it does not help.
// TODO: Find cause of the request being dropped in the dbus library and fix it.
return fmt.Errorf("timed out moving conmon with pid %d to systemd unit %s", pid, unitName)
}
return nil
}
// Syncfs ensures the file system at path is synced to disk.
func Syncfs(path string) error {
f, err := os.OpenFile(path, os.O_RDONLY, 0o755)
if err != nil {
return err
}
defer f.Close()
if err := unix.Syncfs(int(f.Fd())); err != nil {
return err
}
return nil
}