// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Address IP address
//
// swagger:model Address
type Address string
// Validate validates this address
func (m Address) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this address based on context it is used
func (m Address) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// AddressPair Addressing information of an endpoint
//
// swagger:model AddressPair
type AddressPair struct {
// IPv4 address
IPV4 string `json:"ipv4,omitempty"`
// UUID of IPv4 expiration timer
IPV4ExpirationUUID string `json:"ipv4-expiration-uuid,omitempty"`
// IPAM pool from which this IPv4 address was allocated
IPV4PoolName string `json:"ipv4-pool-name,omitempty"`
// IPv6 address
IPV6 string `json:"ipv6,omitempty"`
// UUID of IPv6 expiration timer
IPV6ExpirationUUID string `json:"ipv6-expiration-uuid,omitempty"`
// IPAM pool from which this IPv6 address was allocated
IPV6PoolName string `json:"ipv6-pool-name,omitempty"`
}
// Validate validates this address pair
func (m *AddressPair) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this address pair based on context it is used
func (m *AddressPair) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *AddressPair) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *AddressPair) UnmarshalBinary(b []byte) error {
var res AddressPair
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// AllocationMap Map of allocated IPs
//
// swagger:model AllocationMap
type AllocationMap map[string]string
// Validate validates this allocation map
func (m AllocationMap) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this allocation map based on context it is used
func (m AllocationMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// AttachMode Core datapath attachment mode
//
// swagger:model AttachMode
type AttachMode string
func NewAttachMode(value AttachMode) *AttachMode {
return &value
}
// Pointer returns a pointer to a freshly-allocated AttachMode.
func (m AttachMode) Pointer() *AttachMode {
return &m
}
const (
// AttachModeTc captures enum value "tc"
AttachModeTc AttachMode = "tc"
// AttachModeTcx captures enum value "tcx"
AttachModeTcx AttachMode = "tcx"
)
// for schema
var attachModeEnum []interface{}
func init() {
var res []AttachMode
if err := json.Unmarshal([]byte(`["tc","tcx"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
attachModeEnum = append(attachModeEnum, v)
}
}
func (m AttachMode) validateAttachModeEnum(path, location string, value AttachMode) error {
if err := validate.EnumCase(path, location, value, attachModeEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this attach mode
func (m AttachMode) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateAttachModeEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this attach mode based on context it is used
func (m AttachMode) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMap BPF map definition and content
//
// swagger:model BPFMap
type BPFMap struct {
// Contents of cache
Cache []*BPFMapEntry `json:"cache"`
// Path to BPF map
Path string `json:"path,omitempty"`
}
// Validate validates this b p f map
func (m *BPFMap) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCache(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMap) validateCache(formats strfmt.Registry) error {
if swag.IsZero(m.Cache) { // not required
return nil
}
for i := 0; i < len(m.Cache); i++ {
if swag.IsZero(m.Cache[i]) { // not required
continue
}
if m.Cache[i] != nil {
if err := m.Cache[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cache" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cache" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map based on the context it is used
func (m *BPFMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCache(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMap) contextValidateCache(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Cache); i++ {
if m.Cache[i] != nil {
if swag.IsZero(m.Cache[i]) { // not required
return nil
}
if err := m.Cache[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cache" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cache" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMap) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMap) UnmarshalBinary(b []byte) error {
var res BPFMap
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BPFMapEntry BPF map cache entry
//
// swagger:model BPFMapEntry
type BPFMapEntry struct {
// Desired action to be performed
// Enum: ["ok","insert","delete"]
DesiredAction string `json:"desired-action,omitempty"`
// Key of map entry
Key string `json:"key,omitempty"`
// Last error seen while performing desired action
LastError string `json:"last-error,omitempty"`
// Value of map entry
Value string `json:"value,omitempty"`
}
// Validate validates this b p f map entry
func (m *BPFMapEntry) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDesiredAction(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var bPFMapEntryTypeDesiredActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","insert","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bPFMapEntryTypeDesiredActionPropEnum = append(bPFMapEntryTypeDesiredActionPropEnum, v)
}
}
const (
// BPFMapEntryDesiredActionOk captures enum value "ok"
BPFMapEntryDesiredActionOk string = "ok"
// BPFMapEntryDesiredActionInsert captures enum value "insert"
BPFMapEntryDesiredActionInsert string = "insert"
// BPFMapEntryDesiredActionDelete captures enum value "delete"
BPFMapEntryDesiredActionDelete string = "delete"
)
// prop value enum
func (m *BPFMapEntry) validateDesiredActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bPFMapEntryTypeDesiredActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BPFMapEntry) validateDesiredAction(formats strfmt.Registry) error {
if swag.IsZero(m.DesiredAction) { // not required
return nil
}
// value enum
if err := m.validateDesiredActionEnum("desired-action", "body", m.DesiredAction); err != nil {
return err
}
return nil
}
// ContextValidate validates this b p f map entry based on context it is used
func (m *BPFMapEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapEntry) UnmarshalBinary(b []byte) error {
var res BPFMapEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapList List of BPF Maps
//
// swagger:model BPFMapList
type BPFMapList struct {
// Array of open BPF map lists
Maps []*BPFMap `json:"maps"`
}
// Validate validates this b p f map list
func (m *BPFMapList) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMaps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapList) validateMaps(formats strfmt.Registry) error {
if swag.IsZero(m.Maps) { // not required
return nil
}
for i := 0; i < len(m.Maps); i++ {
if swag.IsZero(m.Maps[i]) { // not required
continue
}
if m.Maps[i] != nil {
if err := m.Maps[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map list based on the context it is used
func (m *BPFMapList) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMaps(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapList) contextValidateMaps(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Maps); i++ {
if m.Maps[i] != nil {
if swag.IsZero(m.Maps[i]) { // not required
return nil
}
if err := m.Maps[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapList) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapList) UnmarshalBinary(b []byte) error {
var res BPFMapList
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapProperties BPF map properties
//
// swagger:model BPFMapProperties
type BPFMapProperties struct {
// Name of the BPF map
Name string `json:"name,omitempty"`
// Size of the BPF map
Size int64 `json:"size,omitempty"`
}
// Validate validates this b p f map properties
func (m *BPFMapProperties) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this b p f map properties based on context it is used
func (m *BPFMapProperties) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapProperties) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapProperties) UnmarshalBinary(b []byte) error {
var res BPFMapProperties
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BPFMapStatus BPF map status
//
// +k8s:deepcopy-gen=true
//
// swagger:model BPFMapStatus
type BPFMapStatus struct {
// Ratio of total system memory to use for dynamic sizing of BPF maps
DynamicSizeRatio float64 `json:"dynamic-size-ratio,omitempty"`
// BPF maps
Maps []*BPFMapProperties `json:"maps"`
}
// Validate validates this b p f map status
func (m *BPFMapStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMaps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapStatus) validateMaps(formats strfmt.Registry) error {
if swag.IsZero(m.Maps) { // not required
return nil
}
for i := 0; i < len(m.Maps); i++ {
if swag.IsZero(m.Maps[i]) { // not required
continue
}
if m.Maps[i] != nil {
if err := m.Maps[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this b p f map status based on the context it is used
func (m *BPFMapStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMaps(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BPFMapStatus) contextValidateMaps(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Maps); i++ {
if m.Maps[i] != nil {
if swag.IsZero(m.Maps[i]) { // not required
return nil
}
if err := m.Maps[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("maps" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("maps" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BPFMapStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BPFMapStatus) UnmarshalBinary(b []byte) error {
var res BPFMapStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BackendAddress Service backend address
//
// swagger:model BackendAddress
type BackendAddress struct {
// Layer 3 address
// Required: true
IP *string `json:"ip"`
// Optional name of the node on which this backend runs
NodeName string `json:"nodeName,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Indicator if this backend is preferred in the context of clustermesh service affinity. The value is set based
// on related annotation of global service. Applicable for active state only.
Preferred bool `json:"preferred,omitempty"`
// Layer 4 protocol (TCP, UDP, etc)
Protocol string `json:"protocol,omitempty"`
// State of the backend for load-balancing service traffic
// Enum: ["active","terminating","quarantined","maintenance"]
State string `json:"state,omitempty"`
// Backend weight
Weight *uint16 `json:"weight,omitempty"`
// Optional name of the zone in which this backend runs
Zone string `json:"zone,omitempty"`
}
// Validate validates this backend address
func (m *BackendAddress) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIP(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BackendAddress) validateIP(formats strfmt.Registry) error {
if err := validate.Required("ip", "body", m.IP); err != nil {
return err
}
return nil
}
var backendAddressTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["active","terminating","quarantined","maintenance"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
backendAddressTypeStatePropEnum = append(backendAddressTypeStatePropEnum, v)
}
}
const (
// BackendAddressStateActive captures enum value "active"
BackendAddressStateActive string = "active"
// BackendAddressStateTerminating captures enum value "terminating"
BackendAddressStateTerminating string = "terminating"
// BackendAddressStateQuarantined captures enum value "quarantined"
BackendAddressStateQuarantined string = "quarantined"
// BackendAddressStateMaintenance captures enum value "maintenance"
BackendAddressStateMaintenance string = "maintenance"
)
// prop value enum
func (m *BackendAddress) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, backendAddressTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *BackendAddress) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this backend address based on context it is used
func (m *BackendAddress) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BackendAddress) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BackendAddress) UnmarshalBinary(b []byte) error {
var res BackendAddress
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BandwidthManager Status of bandwidth manager
//
// +k8s:deepcopy-gen=true
//
// swagger:model BandwidthManager
type BandwidthManager struct {
// congestion control
// Enum: ["cubic","bbr"]
CongestionControl string `json:"congestionControl,omitempty"`
// devices
Devices []string `json:"devices"`
// Is bandwidth manager enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this bandwidth manager
func (m *BandwidthManager) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCongestionControl(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var bandwidthManagerTypeCongestionControlPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["cubic","bbr"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bandwidthManagerTypeCongestionControlPropEnum = append(bandwidthManagerTypeCongestionControlPropEnum, v)
}
}
const (
// BandwidthManagerCongestionControlCubic captures enum value "cubic"
BandwidthManagerCongestionControlCubic string = "cubic"
// BandwidthManagerCongestionControlBbr captures enum value "bbr"
BandwidthManagerCongestionControlBbr string = "bbr"
)
// prop value enum
func (m *BandwidthManager) validateCongestionControlEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bandwidthManagerTypeCongestionControlPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BandwidthManager) validateCongestionControl(formats strfmt.Registry) error {
if swag.IsZero(m.CongestionControl) { // not required
return nil
}
// value enum
if err := m.validateCongestionControlEnum("congestionControl", "body", m.CongestionControl); err != nil {
return err
}
return nil
}
// ContextValidate validates this bandwidth manager based on context it is used
func (m *BandwidthManager) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BandwidthManager) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BandwidthManager) UnmarshalBinary(b []byte) error {
var res BandwidthManager
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpFamily Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path
//
// swagger:model BgpFamily
type BgpFamily struct {
// Address Family Indicator (AFI) of the path
Afi string `json:"afi,omitempty"`
// Subsequent Address Family Indicator (SAFI) of the path
Safi string `json:"safi,omitempty"`
}
// Validate validates this bgp family
func (m *BgpFamily) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp family based on context it is used
func (m *BgpFamily) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpFamily) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpFamily) UnmarshalBinary(b []byte) error {
var res BgpFamily
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpGracefulRestart BGP graceful restart parameters negotiated with the peer.
//
// swagger:model BgpGracefulRestart
type BgpGracefulRestart struct {
// When set, graceful restart capability is negotiated for all AFI/SAFIs of
// this peer.
Enabled bool `json:"enabled,omitempty"`
// This is the time advertised to peer for the BGP session to be re-established
// after a restart. After this period, peer will remove stale routes.
// (RFC 4724 section 4.2)
RestartTimeSeconds int64 `json:"restart-time-seconds,omitempty"`
}
// Validate validates this bgp graceful restart
func (m *BgpGracefulRestart) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp graceful restart based on context it is used
func (m *BgpGracefulRestart) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpGracefulRestart) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpGracefulRestart) UnmarshalBinary(b []byte) error {
var res BgpGracefulRestart
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpNlri Network Layer Reachability Information (NLRI) of the path
//
// swagger:model BgpNlri
type BgpNlri struct {
// Base64-encoded NLRI in the BGP UPDATE message format
Base64 string `json:"base64,omitempty"`
}
// Validate validates this bgp nlri
func (m *BgpNlri) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp nlri based on context it is used
func (m *BgpNlri) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpNlri) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpNlri) UnmarshalBinary(b []byte) error {
var res BgpNlri
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPath Single BGP routing Path containing BGP Network Layer Reachability Information (NLRI) and path attributes
//
// swagger:model BgpPath
type BgpPath struct {
// Age of the path (time since its creation) in nanoseconds
AgeNanoseconds int64 `json:"age-nanoseconds,omitempty"`
// True value flags the best path towards the destination prefix
Best bool `json:"best,omitempty"`
// Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path
Family *BgpFamily `json:"family,omitempty"`
// Network Layer Reachability Information of the path
Nlri *BgpNlri `json:"nlri,omitempty"`
// List of BGP path attributes specific for the path
PathAttributes []*BgpPathAttribute `json:"path-attributes"`
// True value marks the path as stale
Stale bool `json:"stale,omitempty"`
}
// Validate validates this bgp path
func (m *BgpPath) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFamily(formats); err != nil {
res = append(res, err)
}
if err := m.validateNlri(formats); err != nil {
res = append(res, err)
}
if err := m.validatePathAttributes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPath) validateFamily(formats strfmt.Registry) error {
if swag.IsZero(m.Family) { // not required
return nil
}
if m.Family != nil {
if err := m.Family.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("family")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("family")
}
return err
}
}
return nil
}
func (m *BgpPath) validateNlri(formats strfmt.Registry) error {
if swag.IsZero(m.Nlri) { // not required
return nil
}
if m.Nlri != nil {
if err := m.Nlri.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nlri")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nlri")
}
return err
}
}
return nil
}
func (m *BgpPath) validatePathAttributes(formats strfmt.Registry) error {
if swag.IsZero(m.PathAttributes) { // not required
return nil
}
for i := 0; i < len(m.PathAttributes); i++ {
if swag.IsZero(m.PathAttributes[i]) { // not required
continue
}
if m.PathAttributes[i] != nil {
if err := m.PathAttributes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this bgp path based on the context it is used
func (m *BgpPath) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFamily(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNlri(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePathAttributes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPath) contextValidateFamily(ctx context.Context, formats strfmt.Registry) error {
if m.Family != nil {
if swag.IsZero(m.Family) { // not required
return nil
}
if err := m.Family.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("family")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("family")
}
return err
}
}
return nil
}
func (m *BgpPath) contextValidateNlri(ctx context.Context, formats strfmt.Registry) error {
if m.Nlri != nil {
if swag.IsZero(m.Nlri) { // not required
return nil
}
if err := m.Nlri.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nlri")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nlri")
}
return err
}
}
return nil
}
func (m *BgpPath) contextValidatePathAttributes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.PathAttributes); i++ {
if m.PathAttributes[i] != nil {
if swag.IsZero(m.PathAttributes[i]) { // not required
return nil
}
if err := m.PathAttributes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpPath) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPath) UnmarshalBinary(b []byte) error {
var res BgpPath
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPathAttribute Single BGP path attribute specific for the path
//
// swagger:model BgpPathAttribute
type BgpPathAttribute struct {
// Base64-encoded BGP path attribute in the BGP UPDATE message format
Base64 string `json:"base64,omitempty"`
}
// Validate validates this bgp path attribute
func (m *BgpPathAttribute) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp path attribute based on context it is used
func (m *BgpPathAttribute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpPathAttribute) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPathAttribute) UnmarshalBinary(b []byte) error {
var res BgpPathAttribute
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpPeer State of a BGP Peer
//
// +k8s:deepcopy-gen=true
//
// swagger:model BgpPeer
type BgpPeer struct {
// Applied initial value for the BGP HoldTimer (RFC 4271, Section 4.2) in seconds.
// The applied value holds the value that is in effect on the current BGP session.
//
AppliedHoldTimeSeconds int64 `json:"applied-hold-time-seconds,omitempty"`
// Applied initial value for the BGP KeepaliveTimer (RFC 4271, Section 8) in seconds.
// The applied value holds the value that is in effect on the current BGP session.
//
AppliedKeepAliveTimeSeconds int64 `json:"applied-keep-alive-time-seconds,omitempty"`
// Configured initial value for the BGP HoldTimer (RFC 4271, Section 4.2) in seconds.
// The configured value will be used for negotiation with the peer during the BGP session establishment.
//
ConfiguredHoldTimeSeconds int64 `json:"configured-hold-time-seconds,omitempty"`
// Configured initial value for the BGP KeepaliveTimer (RFC 4271, Section 8) in seconds.
// The applied value may be different than the configured value, as it depends on the negotiated hold time interval.
//
ConfiguredKeepAliveTimeSeconds int64 `json:"configured-keep-alive-time-seconds,omitempty"`
// Initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8) in seconds
ConnectRetryTimeSeconds int64 `json:"connect-retry-time-seconds,omitempty"`
// Time To Live (TTL) value used in BGP packets sent to the eBGP neighbor.
// 1 implies that eBGP multi-hop feature is disabled (only a single hop is allowed).
//
EbgpMultihopTTL int64 `json:"ebgp-multihop-ttl,omitempty"`
// BGP peer address family state
Families []*BgpPeerFamilies `json:"families"`
// Graceful restart capability
GracefulRestart *BgpGracefulRestart `json:"graceful-restart,omitempty"`
// Local AS Number
LocalAsn int64 `json:"local-asn,omitempty"`
// IP Address of peer
PeerAddress string `json:"peer-address,omitempty"`
// Peer AS Number
PeerAsn int64 `json:"peer-asn,omitempty"`
// TCP port number of peer
// Maximum: 65535
// Minimum: 1
PeerPort int64 `json:"peer-port,omitempty"`
// BGP peer operational state as described here
// https://www.rfc-editor.org/rfc/rfc4271#section-8.2.2
//
SessionState string `json:"session-state,omitempty"`
// Set when a TCP password is configured for communications with this peer
TCPPasswordEnabled bool `json:"tcp-password-enabled,omitempty"`
// BGP peer connection uptime in nano seconds.
UptimeNanoseconds int64 `json:"uptime-nanoseconds,omitempty"`
}
// Validate validates this bgp peer
func (m *BgpPeer) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFamilies(formats); err != nil {
res = append(res, err)
}
if err := m.validateGracefulRestart(formats); err != nil {
res = append(res, err)
}
if err := m.validatePeerPort(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPeer) validateFamilies(formats strfmt.Registry) error {
if swag.IsZero(m.Families) { // not required
return nil
}
for i := 0; i < len(m.Families); i++ {
if swag.IsZero(m.Families[i]) { // not required
continue
}
if m.Families[i] != nil {
if err := m.Families[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpPeer) validateGracefulRestart(formats strfmt.Registry) error {
if swag.IsZero(m.GracefulRestart) { // not required
return nil
}
if m.GracefulRestart != nil {
if err := m.GracefulRestart.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("graceful-restart")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("graceful-restart")
}
return err
}
}
return nil
}
func (m *BgpPeer) validatePeerPort(formats strfmt.Registry) error {
if swag.IsZero(m.PeerPort) { // not required
return nil
}
if err := validate.MinimumInt("peer-port", "body", m.PeerPort, 1, false); err != nil {
return err
}
if err := validate.MaximumInt("peer-port", "body", m.PeerPort, 65535, false); err != nil {
return err
}
return nil
}
// ContextValidate validate this bgp peer based on the context it is used
func (m *BgpPeer) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFamilies(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateGracefulRestart(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpPeer) contextValidateFamilies(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Families); i++ {
if m.Families[i] != nil {
if swag.IsZero(m.Families[i]) { // not required
return nil
}
if err := m.Families[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpPeer) contextValidateGracefulRestart(ctx context.Context, formats strfmt.Registry) error {
if m.GracefulRestart != nil {
if swag.IsZero(m.GracefulRestart) { // not required
return nil
}
if err := m.GracefulRestart.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("graceful-restart")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("graceful-restart")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpPeer) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPeer) UnmarshalBinary(b []byte) error {
var res BgpPeer
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpPeerFamilies BGP AFI SAFI state of the peer
//
// swagger:model BgpPeerFamilies
type BgpPeerFamilies struct {
// Number of routes accepted from the peer of this address family
Accepted int64 `json:"accepted,omitempty"`
// Number of routes advertised of this address family to the peer
Advertised int64 `json:"advertised,omitempty"`
// BGP address family indicator
Afi string `json:"afi,omitempty"`
// Number of routes received from the peer of this address family
Received int64 `json:"received,omitempty"`
// BGP subsequent address family indicator
Safi string `json:"safi,omitempty"`
}
// Validate validates this bgp peer families
func (m *BgpPeerFamilies) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp peer families based on context it is used
func (m *BgpPeerFamilies) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpPeerFamilies) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpPeerFamilies) UnmarshalBinary(b []byte) error {
var res BgpPeerFamilies
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpRoute Single BGP route retrieved from the RIB of underlying router
//
// swagger:model BgpRoute
type BgpRoute struct {
// IP address specifying a BGP neighbor if the source table type is adj-rib-in or adj-rib-out
Neighbor string `json:"neighbor,omitempty"`
// List of routing paths leading towards the prefix
Paths []*BgpPath `json:"paths"`
// IP prefix of the route
Prefix string `json:"prefix,omitempty"`
// Autonomous System Number (ASN) identifying a BGP virtual router instance
RouterAsn int64 `json:"router-asn,omitempty"`
}
// Validate validates this bgp route
func (m *BgpRoute) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePaths(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoute) validatePaths(formats strfmt.Registry) error {
if swag.IsZero(m.Paths) { // not required
return nil
}
for i := 0; i < len(m.Paths); i++ {
if swag.IsZero(m.Paths[i]) { // not required
continue
}
if m.Paths[i] != nil {
if err := m.Paths[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("paths" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("paths" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this bgp route based on the context it is used
func (m *BgpRoute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePaths(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoute) contextValidatePaths(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Paths); i++ {
if m.Paths[i] != nil {
if swag.IsZero(m.Paths[i]) { // not required
return nil
}
if err := m.Paths[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("paths" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("paths" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoute) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoute) UnmarshalBinary(b []byte) error {
var res BgpRoute
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpRoutePolicy Single BGP route policy retrieved from the underlying router
//
// swagger:model BgpRoutePolicy
type BgpRoutePolicy struct {
// Name of the route policy
Name string `json:"name,omitempty"`
// Autonomous System Number (ASN) identifying a BGP virtual router instance
RouterAsn int64 `json:"router-asn,omitempty"`
// List of the route policy statements
Statements []*BgpRoutePolicyStatement `json:"statements"`
// Type of the route policy
// Enum: ["export","import"]
Type string `json:"type,omitempty"`
}
// Validate validates this bgp route policy
func (m *BgpRoutePolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateStatements(formats); err != nil {
res = append(res, err)
}
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicy) validateStatements(formats strfmt.Registry) error {
if swag.IsZero(m.Statements) { // not required
return nil
}
for i := 0; i < len(m.Statements); i++ {
if swag.IsZero(m.Statements[i]) { // not required
continue
}
if m.Statements[i] != nil {
if err := m.Statements[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statements" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statements" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
var bgpRoutePolicyTypeTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["export","import"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bgpRoutePolicyTypeTypePropEnum = append(bgpRoutePolicyTypeTypePropEnum, v)
}
}
const (
// BgpRoutePolicyTypeExport captures enum value "export"
BgpRoutePolicyTypeExport string = "export"
// BgpRoutePolicyTypeImport captures enum value "import"
BgpRoutePolicyTypeImport string = "import"
)
// prop value enum
func (m *BgpRoutePolicy) validateTypeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bgpRoutePolicyTypeTypePropEnum, true); err != nil {
return err
}
return nil
}
func (m *BgpRoutePolicy) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
// value enum
if err := m.validateTypeEnum("type", "body", m.Type); err != nil {
return err
}
return nil
}
// ContextValidate validate this bgp route policy based on the context it is used
func (m *BgpRoutePolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateStatements(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicy) contextValidateStatements(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Statements); i++ {
if m.Statements[i] != nil {
if swag.IsZero(m.Statements[i]) { // not required
return nil
}
if err := m.Statements[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statements" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statements" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicy) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// BgpRoutePolicyPrefixMatch Matches a CIDR prefix in a BGP route policy
//
// swagger:model BgpRoutePolicyPrefixMatch
type BgpRoutePolicyPrefixMatch struct {
// CIDR prefix to match with
Cidr string `json:"cidr,omitempty"`
// Maximal prefix length that will match if it falls under CIDR
PrefixLenMax int64 `json:"prefix-len-max,omitempty"`
// Minimal prefix length that will match if it falls under CIDR
PrefixLenMin int64 `json:"prefix-len-min,omitempty"`
}
// Validate validates this bgp route policy prefix match
func (m *BgpRoutePolicyPrefixMatch) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this bgp route policy prefix match based on context it is used
func (m *BgpRoutePolicyPrefixMatch) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicyPrefixMatch) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicyPrefixMatch) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicyPrefixMatch
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// BgpRoutePolicyStatement Single BGP route policy statement
//
// swagger:model BgpRoutePolicyStatement
type BgpRoutePolicyStatement struct {
// List of BGP standard community values to be added to the matched route
AddCommunities []string `json:"add-communities"`
// List of BGP large community values to be added to the matched route
AddLargeCommunities []string `json:"add-large-communities"`
// Matches any of the provided address families. If empty matches all address families.
MatchFamilies []*BgpFamily `json:"match-families"`
// Matches any of the provided BGP neighbor IP addresses. If empty matches all neighbors.
MatchNeighbors []string `json:"match-neighbors"`
// Matches any of the provided prefixes. If empty matches all prefixes.
MatchPrefixes []*BgpRoutePolicyPrefixMatch `json:"match-prefixes"`
// RIB processing action taken on the matched route
// Enum: ["none","accept","reject"]
RouteAction string `json:"route-action,omitempty"`
// BGP local preference value to be set on the matched route
SetLocalPreference int64 `json:"set-local-preference,omitempty"`
}
// Validate validates this bgp route policy statement
func (m *BgpRoutePolicyStatement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMatchFamilies(formats); err != nil {
res = append(res, err)
}
if err := m.validateMatchPrefixes(formats); err != nil {
res = append(res, err)
}
if err := m.validateRouteAction(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicyStatement) validateMatchFamilies(formats strfmt.Registry) error {
if swag.IsZero(m.MatchFamilies) { // not required
return nil
}
for i := 0; i < len(m.MatchFamilies); i++ {
if swag.IsZero(m.MatchFamilies[i]) { // not required
continue
}
if m.MatchFamilies[i] != nil {
if err := m.MatchFamilies[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpRoutePolicyStatement) validateMatchPrefixes(formats strfmt.Registry) error {
if swag.IsZero(m.MatchPrefixes) { // not required
return nil
}
for i := 0; i < len(m.MatchPrefixes); i++ {
if swag.IsZero(m.MatchPrefixes[i]) { // not required
continue
}
if m.MatchPrefixes[i] != nil {
if err := m.MatchPrefixes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
var bgpRoutePolicyStatementTypeRouteActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["none","accept","reject"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
bgpRoutePolicyStatementTypeRouteActionPropEnum = append(bgpRoutePolicyStatementTypeRouteActionPropEnum, v)
}
}
const (
// BgpRoutePolicyStatementRouteActionNone captures enum value "none"
BgpRoutePolicyStatementRouteActionNone string = "none"
// BgpRoutePolicyStatementRouteActionAccept captures enum value "accept"
BgpRoutePolicyStatementRouteActionAccept string = "accept"
// BgpRoutePolicyStatementRouteActionReject captures enum value "reject"
BgpRoutePolicyStatementRouteActionReject string = "reject"
)
// prop value enum
func (m *BgpRoutePolicyStatement) validateRouteActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, bgpRoutePolicyStatementTypeRouteActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *BgpRoutePolicyStatement) validateRouteAction(formats strfmt.Registry) error {
if swag.IsZero(m.RouteAction) { // not required
return nil
}
// value enum
if err := m.validateRouteActionEnum("route-action", "body", m.RouteAction); err != nil {
return err
}
return nil
}
// ContextValidate validate this bgp route policy statement based on the context it is used
func (m *BgpRoutePolicyStatement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMatchFamilies(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateMatchPrefixes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *BgpRoutePolicyStatement) contextValidateMatchFamilies(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.MatchFamilies); i++ {
if m.MatchFamilies[i] != nil {
if swag.IsZero(m.MatchFamilies[i]) { // not required
return nil
}
if err := m.MatchFamilies[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-families" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-families" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *BgpRoutePolicyStatement) contextValidateMatchPrefixes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.MatchPrefixes); i++ {
if m.MatchPrefixes[i] != nil {
if swag.IsZero(m.MatchPrefixes[i]) { // not required
return nil
}
if err := m.MatchPrefixes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *BgpRoutePolicyStatement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *BgpRoutePolicyStatement) UnmarshalBinary(b []byte) error {
var res BgpRoutePolicyStatement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CIDRList List of CIDRs
//
// swagger:model CIDRList
type CIDRList struct {
// list
List []string `json:"list"`
// revision
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this c ID r list
func (m *CIDRList) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this c ID r list based on context it is used
func (m *CIDRList) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CIDRList) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CIDRList) UnmarshalBinary(b []byte) error {
var res CIDRList
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CIDRPolicy CIDR endpoint policy
//
// swagger:model CIDRPolicy
type CIDRPolicy struct {
// List of CIDR egress rules
Egress []*PolicyRule `json:"egress"`
// List of CIDR ingress rules
Ingress []*PolicyRule `json:"ingress"`
}
// Validate validates this c ID r policy
func (m *CIDRPolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEgress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CIDRPolicy) validateEgress(formats strfmt.Registry) error {
if swag.IsZero(m.Egress) { // not required
return nil
}
for i := 0; i < len(m.Egress); i++ {
if swag.IsZero(m.Egress[i]) { // not required
continue
}
if m.Egress[i] != nil {
if err := m.Egress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *CIDRPolicy) validateIngress(formats strfmt.Registry) error {
if swag.IsZero(m.Ingress) { // not required
return nil
}
for i := 0; i < len(m.Ingress); i++ {
if swag.IsZero(m.Ingress[i]) { // not required
continue
}
if m.Ingress[i] != nil {
if err := m.Ingress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this c ID r policy based on the context it is used
func (m *CIDRPolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEgress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CIDRPolicy) contextValidateEgress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Egress); i++ {
if m.Egress[i] != nil {
if swag.IsZero(m.Egress[i]) { // not required
return nil
}
if err := m.Egress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *CIDRPolicy) contextValidateIngress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Ingress); i++ {
if m.Ingress[i] != nil {
if swag.IsZero(m.Ingress[i]) { // not required
return nil
}
if err := m.Ingress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CIDRPolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CIDRPolicy) UnmarshalBinary(b []byte) error {
var res CIDRPolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// CNIChainingStatus Status of CNI chaining
//
// swagger:model CNIChainingStatus
type CNIChainingStatus struct {
// mode
// Enum: ["none","aws-cni","flannel","generic-veth","portmap"]
Mode string `json:"mode,omitempty"`
}
// Validate validates this c n i chaining status
func (m *CNIChainingStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var cNIChainingStatusTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["none","aws-cni","flannel","generic-veth","portmap"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
cNIChainingStatusTypeModePropEnum = append(cNIChainingStatusTypeModePropEnum, v)
}
}
const (
// CNIChainingStatusModeNone captures enum value "none"
CNIChainingStatusModeNone string = "none"
// CNIChainingStatusModeAwsDashCni captures enum value "aws-cni"
CNIChainingStatusModeAwsDashCni string = "aws-cni"
// CNIChainingStatusModeFlannel captures enum value "flannel"
CNIChainingStatusModeFlannel string = "flannel"
// CNIChainingStatusModeGenericDashVeth captures enum value "generic-veth"
CNIChainingStatusModeGenericDashVeth string = "generic-veth"
// CNIChainingStatusModePortmap captures enum value "portmap"
CNIChainingStatusModePortmap string = "portmap"
)
// prop value enum
func (m *CNIChainingStatus) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, cNIChainingStatusTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *CNIChainingStatus) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this c n i chaining status based on context it is used
func (m *CNIChainingStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CNIChainingStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CNIChainingStatus) UnmarshalBinary(b []byte) error {
var res CNIChainingStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupContainerMetadata cgroup container metadata
//
// swagger:model CgroupContainerMetadata
type CgroupContainerMetadata struct {
// cgroup id
CgroupID uint64 `json:"cgroup-id,omitempty"`
// cgroup path
CgroupPath string `json:"cgroup-path,omitempty"`
}
// Validate validates this cgroup container metadata
func (m *CgroupContainerMetadata) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this cgroup container metadata based on context it is used
func (m *CgroupContainerMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *CgroupContainerMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupContainerMetadata) UnmarshalBinary(b []byte) error {
var res CgroupContainerMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupDumpMetadata cgroup full metadata
//
// swagger:model CgroupDumpMetadata
type CgroupDumpMetadata struct {
// pod metadatas
PodMetadatas []*CgroupPodMetadata `json:"pod-metadatas"`
}
// Validate validates this cgroup dump metadata
func (m *CgroupDumpMetadata) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePodMetadatas(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupDumpMetadata) validatePodMetadatas(formats strfmt.Registry) error {
if swag.IsZero(m.PodMetadatas) { // not required
return nil
}
for i := 0; i < len(m.PodMetadatas); i++ {
if swag.IsZero(m.PodMetadatas[i]) { // not required
continue
}
if m.PodMetadatas[i] != nil {
if err := m.PodMetadatas[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cgroup dump metadata based on the context it is used
func (m *CgroupDumpMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePodMetadatas(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupDumpMetadata) contextValidatePodMetadatas(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.PodMetadatas); i++ {
if m.PodMetadatas[i] != nil {
if swag.IsZero(m.PodMetadatas[i]) { // not required
return nil
}
if err := m.PodMetadatas[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("pod-metadatas" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CgroupDumpMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupDumpMetadata) UnmarshalBinary(b []byte) error {
var res CgroupDumpMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// CgroupPodMetadata cgroup pod metadata
//
// swagger:model CgroupPodMetadata
type CgroupPodMetadata struct {
// containers
Containers []*CgroupContainerMetadata `json:"containers"`
// ips
Ips []string `json:"ips"`
// name
Name string `json:"name,omitempty"`
// namespace
Namespace string `json:"namespace,omitempty"`
}
// Validate validates this cgroup pod metadata
func (m *CgroupPodMetadata) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateContainers(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupPodMetadata) validateContainers(formats strfmt.Registry) error {
if swag.IsZero(m.Containers) { // not required
return nil
}
for i := 0; i < len(m.Containers); i++ {
if swag.IsZero(m.Containers[i]) { // not required
continue
}
if m.Containers[i] != nil {
if err := m.Containers[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("containers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("containers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cgroup pod metadata based on the context it is used
func (m *CgroupPodMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateContainers(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CgroupPodMetadata) contextValidateContainers(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Containers); i++ {
if m.Containers[i] != nil {
if swag.IsZero(m.Containers[i]) { // not required
return nil
}
if err := m.Containers[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("containers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("containers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CgroupPodMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CgroupPodMetadata) UnmarshalBinary(b []byte) error {
var res CgroupPodMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ClockSource Status of BPF clock source
//
// swagger:model ClockSource
type ClockSource struct {
// Kernel Hz
Hertz int64 `json:"hertz,omitempty"`
// Datapath clock source
// Enum: ["ktime","jiffies"]
Mode string `json:"mode,omitempty"`
}
// Validate validates this clock source
func (m *ClockSource) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var clockSourceTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ktime","jiffies"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
clockSourceTypeModePropEnum = append(clockSourceTypeModePropEnum, v)
}
}
const (
// ClockSourceModeKtime captures enum value "ktime"
ClockSourceModeKtime string = "ktime"
// ClockSourceModeJiffies captures enum value "jiffies"
ClockSourceModeJiffies string = "jiffies"
)
// prop value enum
func (m *ClockSource) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, clockSourceTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ClockSource) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this clock source based on context it is used
func (m *ClockSource) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ClockSource) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClockSource) UnmarshalBinary(b []byte) error {
var res ClockSource
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterMeshStatus Status of ClusterMesh
//
// +k8s:deepcopy-gen=true
//
// swagger:model ClusterMeshStatus
type ClusterMeshStatus struct {
// List of remote clusters
Clusters []*RemoteCluster `json:"clusters"`
// Number of global services
NumGlobalServices int64 `json:"num-global-services,omitempty"`
}
// Validate validates this cluster mesh status
func (m *ClusterMeshStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateClusters(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterMeshStatus) validateClusters(formats strfmt.Registry) error {
if swag.IsZero(m.Clusters) { // not required
return nil
}
for i := 0; i < len(m.Clusters); i++ {
if swag.IsZero(m.Clusters[i]) { // not required
continue
}
if m.Clusters[i] != nil {
if err := m.Clusters[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clusters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clusters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster mesh status based on the context it is used
func (m *ClusterMeshStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateClusters(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterMeshStatus) contextValidateClusters(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Clusters); i++ {
if m.Clusters[i] != nil {
if swag.IsZero(m.Clusters[i]) { // not required
return nil
}
if err := m.Clusters[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clusters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clusters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterMeshStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterMeshStatus) UnmarshalBinary(b []byte) error {
var res ClusterMeshStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterNodeStatus Status of cluster
//
// swagger:model ClusterNodeStatus
type ClusterNodeStatus struct {
// ID that should be used by the client to receive a diff from the previous request
ClientID int64 `json:"client-id,omitempty"`
// List of known nodes
NodesAdded []*NodeElement `json:"nodes-added"`
// List of known nodes
NodesRemoved []*NodeElement `json:"nodes-removed"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster node status
func (m *ClusterNodeStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateNodesAdded(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodesRemoved(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodeStatus) validateNodesAdded(formats strfmt.Registry) error {
if swag.IsZero(m.NodesAdded) { // not required
return nil
}
for i := 0; i < len(m.NodesAdded); i++ {
if swag.IsZero(m.NodesAdded[i]) { // not required
continue
}
if m.NodesAdded[i] != nil {
if err := m.NodesAdded[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-added" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-added" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ClusterNodeStatus) validateNodesRemoved(formats strfmt.Registry) error {
if swag.IsZero(m.NodesRemoved) { // not required
return nil
}
for i := 0; i < len(m.NodesRemoved); i++ {
if swag.IsZero(m.NodesRemoved[i]) { // not required
continue
}
if m.NodesRemoved[i] != nil {
if err := m.NodesRemoved[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster node status based on the context it is used
func (m *ClusterNodeStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateNodesAdded(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodesRemoved(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodeStatus) contextValidateNodesAdded(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.NodesAdded); i++ {
if m.NodesAdded[i] != nil {
if swag.IsZero(m.NodesAdded[i]) { // not required
return nil
}
if err := m.NodesAdded[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-added" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-added" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ClusterNodeStatus) contextValidateNodesRemoved(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.NodesRemoved); i++ {
if m.NodesRemoved[i] != nil {
if swag.IsZero(m.NodesRemoved[i]) { // not required
return nil
}
if err := m.NodesRemoved[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes-removed" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterNodeStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterNodeStatus) UnmarshalBinary(b []byte) error {
var res ClusterNodeStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterNodesResponse cluster nodes response
//
// swagger:model ClusterNodesResponse
type ClusterNodesResponse struct {
// List of known nodes
Nodes []*NodeElement `json:"nodes"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster nodes response
func (m *ClusterNodesResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateNodes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodesResponse) validateNodes(formats strfmt.Registry) error {
if swag.IsZero(m.Nodes) { // not required
return nil
}
for i := 0; i < len(m.Nodes); i++ {
if swag.IsZero(m.Nodes[i]) { // not required
continue
}
if m.Nodes[i] != nil {
if err := m.Nodes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster nodes response based on the context it is used
func (m *ClusterNodesResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateNodes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterNodesResponse) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Nodes); i++ {
if m.Nodes[i] != nil {
if swag.IsZero(m.Nodes[i]) { // not required
return nil
}
if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterNodesResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterNodesResponse) UnmarshalBinary(b []byte) error {
var res ClusterNodesResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ClusterStatus Status of cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model ClusterStatus
type ClusterStatus struct {
// Status of local cilium-health daemon
CiliumHealth *Status `json:"ciliumHealth,omitempty"`
// List of known nodes
Nodes []*NodeElement `json:"nodes"`
// Name of local node (if available)
Self string `json:"self,omitempty"`
}
// Validate validates this cluster status
func (m *ClusterStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCiliumHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterStatus) validateCiliumHealth(formats strfmt.Registry) error {
if swag.IsZero(m.CiliumHealth) { // not required
return nil
}
if m.CiliumHealth != nil {
if err := m.CiliumHealth.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ciliumHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ciliumHealth")
}
return err
}
}
return nil
}
func (m *ClusterStatus) validateNodes(formats strfmt.Registry) error {
if swag.IsZero(m.Nodes) { // not required
return nil
}
for i := 0; i < len(m.Nodes); i++ {
if swag.IsZero(m.Nodes[i]) { // not required
continue
}
if m.Nodes[i] != nil {
if err := m.Nodes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this cluster status based on the context it is used
func (m *ClusterStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCiliumHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodes(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ClusterStatus) contextValidateCiliumHealth(ctx context.Context, formats strfmt.Registry) error {
if m.CiliumHealth != nil {
if swag.IsZero(m.CiliumHealth) { // not required
return nil
}
if err := m.CiliumHealth.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ciliumHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ciliumHealth")
}
return err
}
}
return nil
}
func (m *ClusterStatus) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Nodes); i++ {
if m.Nodes[i] != nil {
if swag.IsZero(m.Nodes[i]) { // not required
return nil
}
if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ClusterStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ClusterStatus) UnmarshalBinary(b []byte) error {
var res ClusterStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// ConfigurationMap Map of configuration key/value pairs.
//
// swagger:model ConfigurationMap
type ConfigurationMap map[string]string
// Validate validates this configuration map
func (m ConfigurationMap) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this configuration map based on context it is used
func (m ConfigurationMap) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ControllerStatus Status of a controller
//
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatus
type ControllerStatus struct {
// configuration
Configuration *ControllerStatusConfiguration `json:"configuration,omitempty"`
// Name of controller
Name string `json:"name,omitempty"`
// status
Status *ControllerStatusStatus `json:"status,omitempty"`
// UUID of controller
// Format: uuid
UUID strfmt.UUID `json:"uuid,omitempty"`
}
// Validate validates this controller status
func (m *ControllerStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if err := m.validateUUID(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatus) validateConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.Configuration) { // not required
return nil
}
if m.Configuration != nil {
if err := m.Configuration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("configuration")
}
return err
}
}
return nil
}
func (m *ControllerStatus) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
func (m *ControllerStatus) validateUUID(formats strfmt.Registry) error {
if swag.IsZero(m.UUID) { // not required
return nil
}
if err := validate.FormatOf("uuid", "body", "uuid", m.UUID.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validate this controller status based on the context it is used
func (m *ControllerStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatus) contextValidateConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.Configuration != nil {
if swag.IsZero(m.Configuration) { // not required
return nil
}
if err := m.Configuration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("configuration")
}
return err
}
}
return nil
}
func (m *ControllerStatus) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatus) UnmarshalBinary(b []byte) error {
var res ControllerStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ControllerStatusConfiguration Configuration of controller
//
// +deepequal-gen=true
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatusConfiguration
type ControllerStatusConfiguration struct {
// Retry on error
ErrorRetry bool `json:"error-retry,omitempty"`
// Base error retry back-off time
// Format: duration
ErrorRetryBase strfmt.Duration `json:"error-retry-base,omitempty"`
// Regular synchronization interval
// Format: duration
Interval strfmt.Duration `json:"interval,omitempty"`
}
// Validate validates this controller status configuration
func (m *ControllerStatusConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateErrorRetryBase(formats); err != nil {
res = append(res, err)
}
if err := m.validateInterval(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatusConfiguration) validateErrorRetryBase(formats strfmt.Registry) error {
if swag.IsZero(m.ErrorRetryBase) { // not required
return nil
}
if err := validate.FormatOf("configuration"+"."+"error-retry-base", "body", "duration", m.ErrorRetryBase.String(), formats); err != nil {
return err
}
return nil
}
func (m *ControllerStatusConfiguration) validateInterval(formats strfmt.Registry) error {
if swag.IsZero(m.Interval) { // not required
return nil
}
if err := validate.FormatOf("configuration"+"."+"interval", "body", "duration", m.Interval.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this controller status configuration based on context it is used
func (m *ControllerStatusConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatusConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatusConfiguration) UnmarshalBinary(b []byte) error {
var res ControllerStatusConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ControllerStatusStatus Current status of controller
//
// +k8s:deepcopy-gen=true
//
// swagger:model ControllerStatusStatus
type ControllerStatusStatus struct {
// Number of consecutive errors since last success
ConsecutiveFailureCount int64 `json:"consecutive-failure-count,omitempty"`
// Total number of failed runs
FailureCount int64 `json:"failure-count,omitempty"`
// Error message of last failed run
LastFailureMsg string `json:"last-failure-msg,omitempty"`
// Timestamp of last error
// Format: date-time
LastFailureTimestamp strfmt.DateTime `json:"last-failure-timestamp,omitempty"`
// Timestamp of last success
// Format: date-time
LastSuccessTimestamp strfmt.DateTime `json:"last-success-timestamp,omitempty"`
// Total number of successful runs
SuccessCount int64 `json:"success-count,omitempty"`
}
// Validate validates this controller status status
func (m *ControllerStatusStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLastFailureTimestamp(formats); err != nil {
res = append(res, err)
}
if err := m.validateLastSuccessTimestamp(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ControllerStatusStatus) validateLastFailureTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.LastFailureTimestamp) { // not required
return nil
}
if err := validate.FormatOf("status"+"."+"last-failure-timestamp", "body", "date-time", m.LastFailureTimestamp.String(), formats); err != nil {
return err
}
return nil
}
func (m *ControllerStatusStatus) validateLastSuccessTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.LastSuccessTimestamp) { // not required
return nil
}
if err := validate.FormatOf("status"+"."+"last-success-timestamp", "body", "date-time", m.LastSuccessTimestamp.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this controller status status based on context it is used
func (m *ControllerStatusStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ControllerStatusStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ControllerStatusStatus) UnmarshalBinary(b []byte) error {
var res ControllerStatusStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ControllerStatuses Collection of controller statuses
//
// swagger:model ControllerStatuses
type ControllerStatuses []*ControllerStatus
// Validate validates this controller statuses
func (m ControllerStatuses) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this controller statuses based on the context it is used
func (m ControllerStatuses) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DaemonConfiguration Response to a daemon configuration request.
//
// swagger:model DaemonConfiguration
type DaemonConfiguration struct {
// Changeable configuration
Spec *DaemonConfigurationSpec `json:"spec,omitempty"`
// Current daemon configuration related status.Contains the addressing
// information, k8s, node monitor and immutable and mutable
// configuration settings.
//
Status *DaemonConfigurationStatus `json:"status,omitempty"`
}
// Validate validates this daemon configuration
func (m *DaemonConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfiguration) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *DaemonConfiguration) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this daemon configuration based on the context it is used
func (m *DaemonConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfiguration) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *DaemonConfiguration) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfiguration) UnmarshalBinary(b []byte) error {
var res DaemonConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// DaemonConfigurationSpec The controllable configuration of the daemon.
//
// swagger:model DaemonConfigurationSpec
type DaemonConfigurationSpec struct {
// Changeable configuration
Options ConfigurationMap `json:"options,omitempty"`
// The policy-enforcement mode
// Enum: ["default","always","never"]
PolicyEnforcement string `json:"policy-enforcement,omitempty"`
}
// Validate validates this daemon configuration spec
func (m *DaemonConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateOptions(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicyEnforcement(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationSpec) validateOptions(formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if m.Options != nil {
if err := m.Options.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
}
return nil
}
var daemonConfigurationSpecTypePolicyEnforcementPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["default","always","never"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
daemonConfigurationSpecTypePolicyEnforcementPropEnum = append(daemonConfigurationSpecTypePolicyEnforcementPropEnum, v)
}
}
const (
// DaemonConfigurationSpecPolicyEnforcementDefault captures enum value "default"
DaemonConfigurationSpecPolicyEnforcementDefault string = "default"
// DaemonConfigurationSpecPolicyEnforcementAlways captures enum value "always"
DaemonConfigurationSpecPolicyEnforcementAlways string = "always"
// DaemonConfigurationSpecPolicyEnforcementNever captures enum value "never"
DaemonConfigurationSpecPolicyEnforcementNever string = "never"
)
// prop value enum
func (m *DaemonConfigurationSpec) validatePolicyEnforcementEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, daemonConfigurationSpecTypePolicyEnforcementPropEnum, true); err != nil {
return err
}
return nil
}
func (m *DaemonConfigurationSpec) validatePolicyEnforcement(formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnforcement) { // not required
return nil
}
// value enum
if err := m.validatePolicyEnforcementEnum("policy-enforcement", "body", m.PolicyEnforcement); err != nil {
return err
}
return nil
}
// ContextValidate validate this daemon configuration spec based on the context it is used
func (m *DaemonConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateOptions(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationSpec) contextValidateOptions(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if err := m.Options.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationSpec) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DaemonConfigurationStatus Response to a daemon configuration request. Contains the addressing
// information, k8s, node monitor and immutable and mutable configuration
// settings.
//
// swagger:model DaemonConfigurationStatus
type DaemonConfigurationStatus struct {
// Maximum IPv4 GRO size on workload facing devices
GROIPV4MaxSize int64 `json:"GROIPv4MaxSize,omitempty"`
// Maximum IPv6 GRO size on workload facing devices
GROMaxSize int64 `json:"GROMaxSize,omitempty"`
// Maximum IPv4 GSO size on workload facing devices
GSOIPV4MaxSize int64 `json:"GSOIPv4MaxSize,omitempty"`
// Maximum IPv6 GSO size on workload facing devices
GSOMaxSize int64 `json:"GSOMaxSize,omitempty"`
// addressing
Addressing *NodeAddressing `json:"addressing,omitempty"`
// Config map which contains all the active daemon configurations
DaemonConfigurationMap map[string]interface{} `json:"daemonConfigurationMap,omitempty"`
// datapath mode
DatapathMode DatapathMode `json:"datapathMode,omitempty"`
// MTU on workload facing devices
DeviceMTU int64 `json:"deviceMTU,omitempty"`
// Configured compatibility mode for --egress-multi-home-ip-rule-compat
EgressMultiHomeIPRuleCompat bool `json:"egress-multi-home-ip-rule-compat,omitempty"`
// Enable route MTU for pod netns when CNI chaining is used
EnableRouteMTUForCNIChaining bool `json:"enableRouteMTUForCNIChaining,omitempty"`
// Immutable configuration (read-only)
Immutable ConfigurationMap `json:"immutable,omitempty"`
// Install ingress/egress routes through uplink on host for Pods when working with
// delegated IPAM plugin.
//
InstallUplinkRoutesForDelegatedIPAM bool `json:"installUplinkRoutesForDelegatedIPAM,omitempty"`
// Comma-separated list of IP ports should be reserved in the workload network namespace
IPLocalReservedPorts string `json:"ipLocalReservedPorts,omitempty"`
// Configured IPAM mode
IpamMode string `json:"ipam-mode,omitempty"`
// k8s configuration
K8sConfiguration string `json:"k8s-configuration,omitempty"`
// k8s endpoint
K8sEndpoint string `json:"k8s-endpoint,omitempty"`
// kvstore configuration
KvstoreConfiguration *KVstoreConfiguration `json:"kvstoreConfiguration,omitempty"`
// masquerade
Masquerade bool `json:"masquerade,omitempty"`
// masquerade protocols
MasqueradeProtocols *DaemonConfigurationStatusMasqueradeProtocols `json:"masqueradeProtocols,omitempty"`
// Status of the node monitor
NodeMonitor *MonitorStatus `json:"nodeMonitor,omitempty"`
// Currently applied configuration
Realized *DaemonConfigurationSpec `json:"realized,omitempty"`
// MTU for network facing routes
RouteMTU int64 `json:"routeMTU,omitempty"`
}
// Validate validates this daemon configuration status
func (m *DaemonConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateImmutable(formats); err != nil {
res = append(res, err)
}
if err := m.validateKvstoreConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateMasqueradeProtocols(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodeMonitor(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationStatus) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if m.Addressing != nil {
if err := m.Addressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateDatapathMode(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapathMode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapathMode")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) validateImmutable(formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if m.Immutable != nil {
if err := m.Immutable.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateKvstoreConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.KvstoreConfiguration) { // not required
return nil
}
if m.KvstoreConfiguration != nil {
if err := m.KvstoreConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstoreConfiguration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstoreConfiguration")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateMasqueradeProtocols(formats strfmt.Registry) error {
if swag.IsZero(m.MasqueradeProtocols) { // not required
return nil
}
if m.MasqueradeProtocols != nil {
if err := m.MasqueradeProtocols.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masqueradeProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masqueradeProtocols")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateNodeMonitor(formats strfmt.Registry) error {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if m.NodeMonitor != nil {
if err := m.NodeMonitor.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this daemon configuration status based on the context it is used
func (m *DaemonConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateImmutable(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKvstoreConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateMasqueradeProtocols(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodeMonitor(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.Addressing != nil {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if err := m.Addressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateDatapathMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapathMode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapathMode")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateImmutable(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if err := m.Immutable.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateKvstoreConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.KvstoreConfiguration != nil {
if swag.IsZero(m.KvstoreConfiguration) { // not required
return nil
}
if err := m.KvstoreConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstoreConfiguration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstoreConfiguration")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateMasqueradeProtocols(ctx context.Context, formats strfmt.Registry) error {
if m.MasqueradeProtocols != nil {
if swag.IsZero(m.MasqueradeProtocols) { // not required
return nil
}
if err := m.MasqueradeProtocols.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masqueradeProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masqueradeProtocols")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateNodeMonitor(ctx context.Context, formats strfmt.Registry) error {
if m.NodeMonitor != nil {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if err := m.NodeMonitor.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *DaemonConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationStatus) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// DaemonConfigurationStatusMasqueradeProtocols Status of masquerading feature
//
// swagger:model DaemonConfigurationStatusMasqueradeProtocols
type DaemonConfigurationStatusMasqueradeProtocols struct {
// Status of masquerading for IPv4 traffic
IPV4 bool `json:"ipv4,omitempty"`
// Status of masquerading for IPv6 traffic
IPV6 bool `json:"ipv6,omitempty"`
}
// Validate validates this daemon configuration status masquerade protocols
func (m *DaemonConfigurationStatusMasqueradeProtocols) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this daemon configuration status masquerade protocols based on context it is used
func (m *DaemonConfigurationStatusMasqueradeProtocols) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *DaemonConfigurationStatusMasqueradeProtocols) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DaemonConfigurationStatusMasqueradeProtocols) UnmarshalBinary(b []byte) error {
var res DaemonConfigurationStatusMasqueradeProtocols
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// DatapathMode Datapath mode
//
// swagger:model DatapathMode
type DatapathMode string
func NewDatapathMode(value DatapathMode) *DatapathMode {
return &value
}
// Pointer returns a pointer to a freshly-allocated DatapathMode.
func (m DatapathMode) Pointer() *DatapathMode {
return &m
}
const (
// DatapathModeVeth captures enum value "veth"
DatapathModeVeth DatapathMode = "veth"
// DatapathModeNetkit captures enum value "netkit"
DatapathModeNetkit DatapathMode = "netkit"
// DatapathModeNetkitDashL2 captures enum value "netkit-l2"
DatapathModeNetkitDashL2 DatapathMode = "netkit-l2"
)
// for schema
var datapathModeEnum []interface{}
func init() {
var res []DatapathMode
if err := json.Unmarshal([]byte(`["veth","netkit","netkit-l2"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
datapathModeEnum = append(datapathModeEnum, v)
}
}
func (m DatapathMode) validateDatapathModeEnum(path, location string, value DatapathMode) error {
if err := validate.EnumCase(path, location, value, datapathModeEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this datapath mode
func (m DatapathMode) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateDatapathModeEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this datapath mode based on context it is used
func (m DatapathMode) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DebugInfo groups some debugging related information on the agent
//
// swagger:model DebugInfo
type DebugInfo struct {
// cilium memory map
CiliumMemoryMap string `json:"cilium-memory-map,omitempty"`
// cilium nodemonitor memory map
CiliumNodemonitorMemoryMap string `json:"cilium-nodemonitor-memory-map,omitempty"`
// cilium status
CiliumStatus *StatusResponse `json:"cilium-status,omitempty"`
// cilium version
CiliumVersion string `json:"cilium-version,omitempty"`
// encryption
Encryption *DebugInfoEncryption `json:"encryption,omitempty"`
// endpoint list
EndpointList []*Endpoint `json:"endpoint-list"`
// environment variables
EnvironmentVariables []string `json:"environment-variables"`
// kernel version
KernelVersion string `json:"kernel-version,omitempty"`
// policy
Policy *Policy `json:"policy,omitempty"`
// service list
ServiceList []*Service `json:"service-list"`
// subsystem
Subsystem map[string]string `json:"subsystem,omitempty"`
}
// Validate validates this debug info
func (m *DebugInfo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCiliumStatus(formats); err != nil {
res = append(res, err)
}
if err := m.validateEncryption(formats); err != nil {
res = append(res, err)
}
if err := m.validateEndpointList(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateServiceList(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfo) validateCiliumStatus(formats strfmt.Registry) error {
if swag.IsZero(m.CiliumStatus) { // not required
return nil
}
if m.CiliumStatus != nil {
if err := m.CiliumStatus.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium-status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium-status")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateEncryption(formats strfmt.Registry) error {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if m.Encryption != nil {
if err := m.Encryption.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateEndpointList(formats strfmt.Registry) error {
if swag.IsZero(m.EndpointList) { // not required
return nil
}
for i := 0; i < len(m.EndpointList); i++ {
if swag.IsZero(m.EndpointList[i]) { // not required
continue
}
if m.EndpointList[i] != nil {
if err := m.EndpointList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *DebugInfo) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if m.Policy != nil {
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *DebugInfo) validateServiceList(formats strfmt.Registry) error {
if swag.IsZero(m.ServiceList) { // not required
return nil
}
for i := 0; i < len(m.ServiceList); i++ {
if swag.IsZero(m.ServiceList[i]) { // not required
continue
}
if m.ServiceList[i] != nil {
if err := m.ServiceList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("service-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("service-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this debug info based on the context it is used
func (m *DebugInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCiliumStatus(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEncryption(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEndpointList(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateServiceList(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfo) contextValidateCiliumStatus(ctx context.Context, formats strfmt.Registry) error {
if m.CiliumStatus != nil {
if swag.IsZero(m.CiliumStatus) { // not required
return nil
}
if err := m.CiliumStatus.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium-status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium-status")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateEncryption(ctx context.Context, formats strfmt.Registry) error {
if m.Encryption != nil {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if err := m.Encryption.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateEndpointList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.EndpointList); i++ {
if m.EndpointList[i] != nil {
if swag.IsZero(m.EndpointList[i]) { // not required
return nil
}
if err := m.EndpointList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("endpoint-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *DebugInfo) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if m.Policy != nil {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *DebugInfo) contextValidateServiceList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.ServiceList); i++ {
if m.ServiceList[i] != nil {
if swag.IsZero(m.ServiceList[i]) { // not required
return nil
}
if err := m.ServiceList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("service-list" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("service-list" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DebugInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DebugInfo) UnmarshalBinary(b []byte) error {
var res DebugInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// DebugInfoEncryption debug info encryption
//
// swagger:model DebugInfoEncryption
type DebugInfoEncryption struct {
// Status of the WireGuard agent
Wireguard *WireguardStatus `json:"wireguard,omitempty"`
}
// Validate validates this debug info encryption
func (m *DebugInfoEncryption) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateWireguard(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfoEncryption) validateWireguard(formats strfmt.Registry) error {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if m.Wireguard != nil {
if err := m.Wireguard.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption" + "." + "wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption" + "." + "wireguard")
}
return err
}
}
return nil
}
// ContextValidate validate this debug info encryption based on the context it is used
func (m *DebugInfoEncryption) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateWireguard(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DebugInfoEncryption) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error {
if m.Wireguard != nil {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if err := m.Wireguard.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption" + "." + "wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption" + "." + "wireguard")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *DebugInfoEncryption) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DebugInfoEncryption) UnmarshalBinary(b []byte) error {
var res DebugInfoEncryption
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// DNSLookup An IP -> DNS mapping, with metadata
//
// swagger:model DNSLookup
type DNSLookup struct {
// The endpoint that made this lookup, or 0 for the agent itself.
EndpointID int64 `json:"endpoint-id,omitempty"`
// The absolute time when this data will expire in this cache
// Format: date-time
ExpirationTime strfmt.DateTime `json:"expiration-time,omitempty"`
// DNS name
Fqdn string `json:"fqdn,omitempty"`
// IP addresses returned in this lookup
Ips []string `json:"ips"`
// The absolute time when this data was received
// Format: date-time
LookupTime strfmt.DateTime `json:"lookup-time,omitempty"`
// The reason this FQDN IP association exists. Either a DNS lookup or an ongoing connection to an IP that was created by a DNS lookup.
Source string `json:"source,omitempty"`
// The TTL in the DNS response
TTL int64 `json:"ttl,omitempty"`
}
// Validate validates this DNS lookup
func (m *DNSLookup) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExpirationTime(formats); err != nil {
res = append(res, err)
}
if err := m.validateLookupTime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DNSLookup) validateExpirationTime(formats strfmt.Registry) error {
if swag.IsZero(m.ExpirationTime) { // not required
return nil
}
if err := validate.FormatOf("expiration-time", "body", "date-time", m.ExpirationTime.String(), formats); err != nil {
return err
}
return nil
}
func (m *DNSLookup) validateLookupTime(formats strfmt.Registry) error {
if swag.IsZero(m.LookupTime) { // not required
return nil
}
if err := validate.FormatOf("lookup-time", "body", "date-time", m.LookupTime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this DNS lookup based on context it is used
func (m *DNSLookup) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *DNSLookup) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DNSLookup) UnmarshalBinary(b []byte) error {
var res DNSLookup
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EncryptionStatus Status of transparent encryption
//
// +k8s:deepcopy-gen=true
//
// swagger:model EncryptionStatus
type EncryptionStatus struct {
// Status of the IPsec agent
Ipsec *IPsecStatus `json:"ipsec,omitempty"`
// mode
// Enum: ["Disabled","IPsec","Wireguard"]
Mode string `json:"mode,omitempty"`
// Human readable error/warning message
Msg string `json:"msg,omitempty"`
// Status of the WireGuard agent
Wireguard *WireguardStatus `json:"wireguard,omitempty"`
}
// Validate validates this encryption status
func (m *EncryptionStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIpsec(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateWireguard(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EncryptionStatus) validateIpsec(formats strfmt.Registry) error {
if swag.IsZero(m.Ipsec) { // not required
return nil
}
if m.Ipsec != nil {
if err := m.Ipsec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipsec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipsec")
}
return err
}
}
return nil
}
var encryptionStatusTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Disabled","IPsec","Wireguard"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
encryptionStatusTypeModePropEnum = append(encryptionStatusTypeModePropEnum, v)
}
}
const (
// EncryptionStatusModeDisabled captures enum value "Disabled"
EncryptionStatusModeDisabled string = "Disabled"
// EncryptionStatusModeIPsec captures enum value "IPsec"
EncryptionStatusModeIPsec string = "IPsec"
// EncryptionStatusModeWireguard captures enum value "Wireguard"
EncryptionStatusModeWireguard string = "Wireguard"
)
// prop value enum
func (m *EncryptionStatus) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, encryptionStatusTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *EncryptionStatus) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
func (m *EncryptionStatus) validateWireguard(formats strfmt.Registry) error {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if m.Wireguard != nil {
if err := m.Wireguard.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("wireguard")
}
return err
}
}
return nil
}
// ContextValidate validate this encryption status based on the context it is used
func (m *EncryptionStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIpsec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateWireguard(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EncryptionStatus) contextValidateIpsec(ctx context.Context, formats strfmt.Registry) error {
if m.Ipsec != nil {
if swag.IsZero(m.Ipsec) { // not required
return nil
}
if err := m.Ipsec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipsec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipsec")
}
return err
}
}
return nil
}
func (m *EncryptionStatus) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error {
if m.Wireguard != nil {
if swag.IsZero(m.Wireguard) { // not required
return nil
}
if err := m.Wireguard.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("wireguard")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("wireguard")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EncryptionStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EncryptionStatus) UnmarshalBinary(b []byte) error {
var res EncryptionStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Endpoint An endpoint is a namespaced network interface to which cilium applies policies
//
// swagger:model Endpoint
type Endpoint struct {
// The cilium-agent-local ID of the endpoint
ID int64 `json:"id,omitempty"`
// The desired configuration state of the endpoint
Spec *EndpointConfigurationSpec `json:"spec,omitempty"`
// The desired and realized configuration state of the endpoint
Status *EndpointStatus `json:"status,omitempty"`
}
// Validate validates this endpoint
func (m *Endpoint) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Endpoint) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Endpoint) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint based on the context it is used
func (m *Endpoint) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Endpoint) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Endpoint) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Endpoint) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Endpoint) UnmarshalBinary(b []byte) error {
var res Endpoint
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointBatchDeleteRequest Properties selecting a batch of endpoints to delete.
//
// swagger:model EndpointBatchDeleteRequest
type EndpointBatchDeleteRequest struct {
// ID assigned by container runtime
ContainerID string `json:"container-id,omitempty"`
}
// Validate validates this endpoint batch delete request
func (m *EndpointBatchDeleteRequest) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint batch delete request based on context it is used
func (m *EndpointBatchDeleteRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointBatchDeleteRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointBatchDeleteRequest) UnmarshalBinary(b []byte) error {
var res EndpointBatchDeleteRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointChangeRequest Structure which contains the mutable elements of an Endpoint.
//
// swagger:model EndpointChangeRequest
type EndpointChangeRequest struct {
// addressing
Addressing *AddressPair `json:"addressing,omitempty"`
// ID assigned by container runtime
ContainerID string `json:"container-id,omitempty"`
// Name of network device in container netns
ContainerInterfaceName string `json:"container-interface-name,omitempty"`
// Name assigned to container
ContainerName string `json:"container-name,omitempty"`
// datapath configuration
DatapathConfiguration *EndpointDatapathConfiguration `json:"datapath-configuration,omitempty"`
// ID of datapath tail call map
DatapathMapID int64 `json:"datapath-map-id,omitempty"`
// Disables lookup using legacy endpoint identifiers (container name, container id, pod name) for this endpoint
DisableLegacyIdentifiers bool `json:"disable-legacy-identifiers,omitempty"`
// Docker endpoint ID
DockerEndpointID string `json:"docker-endpoint-id,omitempty"`
// Docker network ID
DockerNetworkID string `json:"docker-network-id,omitempty"`
// MAC address
HostMac string `json:"host-mac,omitempty"`
// Local endpoint ID
ID int64 `json:"id,omitempty"`
// Index of network device in host netns
InterfaceIndex int64 `json:"interface-index,omitempty"`
// Name of network device in host netns
InterfaceName string `json:"interface-name,omitempty"`
// Kubernetes namespace name
K8sNamespace string `json:"k8s-namespace,omitempty"`
// Kubernetes pod name
K8sPodName string `json:"k8s-pod-name,omitempty"`
// Kubernetes pod UID
K8sUID string `json:"k8s-uid,omitempty"`
// Labels describing the identity
Labels Labels `json:"labels,omitempty"`
// MAC address
Mac string `json:"mac,omitempty"`
// Network namespace cookie
NetnsCookie string `json:"netns-cookie,omitempty"`
// Index of network device from which an IP was used as endpoint IP. Only relevant for ENI environments.
ParentInterfaceIndex int64 `json:"parent-interface-index,omitempty"`
// Process ID of the workload belonging to this endpoint
Pid int64 `json:"pid,omitempty"`
// Whether policy enforcement is enabled or not
PolicyEnabled bool `json:"policy-enabled,omitempty"`
// Properties is used to store information about the endpoint at creation. Useful for tests.
Properties map[string]interface{} `json:"properties,omitempty"`
// Current state of endpoint
// Required: true
State *EndpointState `json:"state"`
// Whether to build an endpoint synchronously
//
SyncBuildEndpoint bool `json:"sync-build-endpoint,omitempty"`
}
// Validate validates this endpoint change request
func (m *EndpointChangeRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointChangeRequest) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if m.Addressing != nil {
if err := m.Addressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) validateDatapathConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathConfiguration) { // not required
return nil
}
if m.DatapathConfiguration != nil {
if err := m.DatapathConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-configuration")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
func (m *EndpointChangeRequest) validateState(formats strfmt.Registry) error {
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if m.State != nil {
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint change request based on the context it is used
func (m *EndpointChangeRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointChangeRequest) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.Addressing != nil {
if swag.IsZero(m.Addressing) { // not required
return nil
}
if err := m.Addressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) contextValidateDatapathConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.DatapathConfiguration != nil {
if swag.IsZero(m.DatapathConfiguration) { // not required
return nil
}
if err := m.DatapathConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-configuration")
}
return err
}
}
return nil
}
func (m *EndpointChangeRequest) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
func (m *EndpointChangeRequest) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if m.State != nil {
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointChangeRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointChangeRequest) UnmarshalBinary(b []byte) error {
var res EndpointChangeRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointConfigurationSpec An endpoint's configuration
//
// swagger:model EndpointConfigurationSpec
type EndpointConfigurationSpec struct {
// the endpoint's labels
LabelConfiguration *LabelConfigurationSpec `json:"label-configuration,omitempty"`
// Changeable configuration
Options ConfigurationMap `json:"options,omitempty"`
}
// Validate validates this endpoint configuration spec
func (m *EndpointConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabelConfiguration(formats); err != nil {
res = append(res, err)
}
if err := m.validateOptions(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationSpec) validateLabelConfiguration(formats strfmt.Registry) error {
if swag.IsZero(m.LabelConfiguration) { // not required
return nil
}
if m.LabelConfiguration != nil {
if err := m.LabelConfiguration.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("label-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("label-configuration")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationSpec) validateOptions(formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if m.Options != nil {
if err := m.Options.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint configuration spec based on the context it is used
func (m *EndpointConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabelConfiguration(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateOptions(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationSpec) contextValidateLabelConfiguration(ctx context.Context, formats strfmt.Registry) error {
if m.LabelConfiguration != nil {
if swag.IsZero(m.LabelConfiguration) { // not required
return nil
}
if err := m.LabelConfiguration.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("label-configuration")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("label-configuration")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationSpec) contextValidateOptions(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Options) { // not required
return nil
}
if err := m.Options.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("options")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("options")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointConfigurationSpec) UnmarshalBinary(b []byte) error {
var res EndpointConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointConfigurationStatus An endpoint's configuration
//
// swagger:model EndpointConfigurationStatus
type EndpointConfigurationStatus struct {
// Most recent error, if applicable
Error Error `json:"error,omitempty"`
// Immutable configuration (read-only)
Immutable ConfigurationMap `json:"immutable,omitempty"`
// currently applied changeable configuration
Realized *EndpointConfigurationSpec `json:"realized,omitempty"`
}
// Validate validates this endpoint configuration status
func (m *EndpointConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateError(formats); err != nil {
res = append(res, err)
}
if err := m.validateImmutable(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationStatus) validateError(formats strfmt.Registry) error {
if swag.IsZero(m.Error) { // not required
return nil
}
if err := m.Error.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("error")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("error")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) validateImmutable(formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if m.Immutable != nil {
if err := m.Immutable.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
}
return nil
}
func (m *EndpointConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint configuration status based on the context it is used
func (m *EndpointConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateError(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateImmutable(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateError(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Error) { // not required
return nil
}
if err := m.Error.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("error")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("error")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateImmutable(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Immutable) { // not required
return nil
}
if err := m.Immutable.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("immutable")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("immutable")
}
return err
}
return nil
}
func (m *EndpointConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointConfigurationStatus) UnmarshalBinary(b []byte) error {
var res EndpointConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointDatapathConfiguration Datapath configuration to be used for the endpoint
//
// swagger:model EndpointDatapathConfiguration
type EndpointDatapathConfiguration struct {
// Disable source IP verification for the endpoint.
//
DisableSipVerification bool `json:"disable-sip-verification,omitempty"`
// Indicates that IPAM is done external to Cilium. This will prevent the IP from being released and re-allocation of the IP address is skipped on restore.
//
ExternalIpam bool `json:"external-ipam,omitempty"`
// Installs a route in the Linux routing table pointing to the device of the endpoint's interface.
//
InstallEndpointRoute bool `json:"install-endpoint-route,omitempty"`
// Enable ARP passthrough mode
RequireArpPassthrough bool `json:"require-arp-passthrough,omitempty"`
// Endpoint requires a host-facing egress program to be attached to implement ingress policy and reverse NAT.
//
RequireEgressProg bool `json:"require-egress-prog,omitempty"`
// Endpoint requires BPF routing to be enabled, when disabled, routing is delegated to Linux routing.
//
RequireRouting *bool `json:"require-routing,omitempty"`
}
// Validate validates this endpoint datapath configuration
func (m *EndpointDatapathConfiguration) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint datapath configuration based on context it is used
func (m *EndpointDatapathConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointDatapathConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointDatapathConfiguration) UnmarshalBinary(b []byte) error {
var res EndpointDatapathConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointHealth Health of the endpoint
//
// +deepequal-gen=true
//
// swagger:model EndpointHealth
type EndpointHealth struct {
// bpf
Bpf EndpointHealthStatus `json:"bpf,omitempty"`
// Is this endpoint reachable
Connected bool `json:"connected,omitempty"`
// overall health
OverallHealth EndpointHealthStatus `json:"overallHealth,omitempty"`
// policy
Policy EndpointHealthStatus `json:"policy,omitempty"`
}
// Validate validates this endpoint health
func (m *EndpointHealth) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBpf(formats); err != nil {
res = append(res, err)
}
if err := m.validateOverallHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointHealth) validateBpf(formats strfmt.Registry) error {
if swag.IsZero(m.Bpf) { // not required
return nil
}
if err := m.Bpf.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf")
}
return err
}
return nil
}
func (m *EndpointHealth) validateOverallHealth(formats strfmt.Registry) error {
if swag.IsZero(m.OverallHealth) { // not required
return nil
}
if err := m.OverallHealth.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("overallHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("overallHealth")
}
return err
}
return nil
}
func (m *EndpointHealth) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
return nil
}
// ContextValidate validate this endpoint health based on the context it is used
func (m *EndpointHealth) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBpf(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateOverallHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointHealth) contextValidateBpf(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Bpf) { // not required
return nil
}
if err := m.Bpf.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf")
}
return err
}
return nil
}
func (m *EndpointHealth) contextValidateOverallHealth(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.OverallHealth) { // not required
return nil
}
if err := m.OverallHealth.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("overallHealth")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("overallHealth")
}
return err
}
return nil
}
func (m *EndpointHealth) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointHealth) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointHealth) UnmarshalBinary(b []byte) error {
var res EndpointHealth
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointHealthStatus A common set of statuses for endpoint health * “OK“ = All components operational * “Bootstrap“ = This component is being created * “Pending“ = A change is being processed to be applied * “Warning“ = This component is not applying up-to-date policies (but is still applying the previous version) * “Failure“ = An error has occurred and no policy is being applied * “Disabled“ = This endpoint is disabled and will not handle traffic
//
// swagger:model EndpointHealthStatus
type EndpointHealthStatus string
func NewEndpointHealthStatus(value EndpointHealthStatus) *EndpointHealthStatus {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointHealthStatus.
func (m EndpointHealthStatus) Pointer() *EndpointHealthStatus {
return &m
}
const (
// EndpointHealthStatusOK captures enum value "OK"
EndpointHealthStatusOK EndpointHealthStatus = "OK"
// EndpointHealthStatusBootstrap captures enum value "Bootstrap"
EndpointHealthStatusBootstrap EndpointHealthStatus = "Bootstrap"
// EndpointHealthStatusPending captures enum value "Pending"
EndpointHealthStatusPending EndpointHealthStatus = "Pending"
// EndpointHealthStatusWarning captures enum value "Warning"
EndpointHealthStatusWarning EndpointHealthStatus = "Warning"
// EndpointHealthStatusFailure captures enum value "Failure"
EndpointHealthStatusFailure EndpointHealthStatus = "Failure"
// EndpointHealthStatusDisabled captures enum value "Disabled"
EndpointHealthStatusDisabled EndpointHealthStatus = "Disabled"
)
// for schema
var endpointHealthStatusEnum []interface{}
func init() {
var res []EndpointHealthStatus
if err := json.Unmarshal([]byte(`["OK","Bootstrap","Pending","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointHealthStatusEnum = append(endpointHealthStatusEnum, v)
}
}
func (m EndpointHealthStatus) validateEndpointHealthStatusEnum(path, location string, value EndpointHealthStatus) error {
if err := validate.EnumCase(path, location, value, endpointHealthStatusEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint health status
func (m EndpointHealthStatus) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointHealthStatusEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint health status based on context it is used
func (m EndpointHealthStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointIdentifiers Unique identifiers for this endpoint from outside cilium
//
// +deepequal-gen=true
//
// swagger:model EndpointIdentifiers
type EndpointIdentifiers struct {
// ID assigned to this attachment by container runtime
CniAttachmentID string `json:"cni-attachment-id,omitempty"`
// ID assigned by container runtime (deprecated, may not be unique)
ContainerID string `json:"container-id,omitempty"`
// Name assigned to container (deprecated, may not be unique)
ContainerName string `json:"container-name,omitempty"`
// Docker endpoint ID
DockerEndpointID string `json:"docker-endpoint-id,omitempty"`
// Docker network ID
DockerNetworkID string `json:"docker-network-id,omitempty"`
// K8s namespace for this endpoint (deprecated, may not be unique)
K8sNamespace string `json:"k8s-namespace,omitempty"`
// K8s pod name for this endpoint (deprecated, may not be unique)
K8sPodName string `json:"k8s-pod-name,omitempty"`
// K8s pod for this endpoint (deprecated, may not be unique)
PodName string `json:"pod-name,omitempty"`
}
// Validate validates this endpoint identifiers
func (m *EndpointIdentifiers) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this endpoint identifiers based on context it is used
func (m *EndpointIdentifiers) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *EndpointIdentifiers) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointIdentifiers) UnmarshalBinary(b []byte) error {
var res EndpointIdentifiers
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointNetworking Unique identifiers for this endpoint from outside cilium
//
// swagger:model EndpointNetworking
type EndpointNetworking struct {
// IP4/6 addresses assigned to this Endpoint
Addressing []*AddressPair `json:"addressing"`
// Name of network device in container netns
ContainerInterfaceName string `json:"container-interface-name,omitempty"`
// host addressing
HostAddressing *NodeAddressing `json:"host-addressing,omitempty"`
// MAC address
HostMac string `json:"host-mac,omitempty"`
// Index of network device in host netns
InterfaceIndex int64 `json:"interface-index,omitempty"`
// Name of network device in host netns
InterfaceName string `json:"interface-name,omitempty"`
// MAC address
Mac string `json:"mac,omitempty"`
}
// Validate validates this endpoint networking
func (m *EndpointNetworking) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostAddressing(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointNetworking) validateAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.Addressing) { // not required
return nil
}
for i := 0; i < len(m.Addressing); i++ {
if swag.IsZero(m.Addressing[i]) { // not required
continue
}
if m.Addressing[i] != nil {
if err := m.Addressing[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointNetworking) validateHostAddressing(formats strfmt.Registry) error {
if swag.IsZero(m.HostAddressing) { // not required
return nil
}
if m.HostAddressing != nil {
if err := m.HostAddressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint networking based on the context it is used
func (m *EndpointNetworking) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointNetworking) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Addressing); i++ {
if m.Addressing[i] != nil {
if swag.IsZero(m.Addressing[i]) { // not required
return nil
}
if err := m.Addressing[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("addressing" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("addressing" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointNetworking) contextValidateHostAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.HostAddressing != nil {
if swag.IsZero(m.HostAddressing) { // not required
return nil
}
if err := m.HostAddressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointNetworking) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointNetworking) UnmarshalBinary(b []byte) error {
var res EndpointNetworking
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointPolicy Policy information of an endpoint
//
// swagger:model EndpointPolicy
type EndpointPolicy struct {
// List of identities to which this endpoint is allowed to communicate
//
AllowedEgressIdentities []int64 `json:"allowed-egress-identities"`
// List of identities allowed to communicate to this endpoint
//
AllowedIngressIdentities []int64 `json:"allowed-ingress-identities"`
// Build number of calculated policy in use
Build int64 `json:"build,omitempty"`
// cidr policy
CidrPolicy *CIDRPolicy `json:"cidr-policy,omitempty"`
// List of identities to which this endpoint is not allowed to communicate
//
DeniedEgressIdentities []int64 `json:"denied-egress-identities"`
// List of identities not allowed to communicate to this endpoint
//
DeniedIngressIdentities []int64 `json:"denied-ingress-identities"`
// Own identity of endpoint
ID int64 `json:"id,omitempty"`
// l4
L4 *L4Policy `json:"l4,omitempty"`
// Whether policy enforcement is enabled (ingress, egress, both or none)
PolicyEnabled EndpointPolicyEnabled `json:"policy-enabled,omitempty"`
// The agent-local policy revision
PolicyRevision int64 `json:"policy-revision,omitempty"`
}
// Validate validates this endpoint policy
func (m *EndpointPolicy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCidrPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateL4(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicyEnabled(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicy) validateCidrPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.CidrPolicy) { // not required
return nil
}
if m.CidrPolicy != nil {
if err := m.CidrPolicy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cidr-policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cidr-policy")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) validateL4(formats strfmt.Registry) error {
if swag.IsZero(m.L4) { // not required
return nil
}
if m.L4 != nil {
if err := m.L4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("l4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("l4")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) validatePolicyEnabled(formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnabled) { // not required
return nil
}
if err := m.PolicyEnabled.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy-enabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy-enabled")
}
return err
}
return nil
}
// ContextValidate validate this endpoint policy based on the context it is used
func (m *EndpointPolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateCidrPolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateL4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicyEnabled(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicy) contextValidateCidrPolicy(ctx context.Context, formats strfmt.Registry) error {
if m.CidrPolicy != nil {
if swag.IsZero(m.CidrPolicy) { // not required
return nil
}
if err := m.CidrPolicy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cidr-policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cidr-policy")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) contextValidateL4(ctx context.Context, formats strfmt.Registry) error {
if m.L4 != nil {
if swag.IsZero(m.L4) { // not required
return nil
}
if err := m.L4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("l4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("l4")
}
return err
}
}
return nil
}
func (m *EndpointPolicy) contextValidatePolicyEnabled(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.PolicyEnabled) { // not required
return nil
}
if err := m.PolicyEnabled.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy-enabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy-enabled")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointPolicy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointPolicy) UnmarshalBinary(b []byte) error {
var res EndpointPolicy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointPolicyEnabled Whether policy enforcement is enabled (ingress, egress, both or none)
//
// swagger:model EndpointPolicyEnabled
type EndpointPolicyEnabled string
func NewEndpointPolicyEnabled(value EndpointPolicyEnabled) *EndpointPolicyEnabled {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointPolicyEnabled.
func (m EndpointPolicyEnabled) Pointer() *EndpointPolicyEnabled {
return &m
}
const (
// EndpointPolicyEnabledNone captures enum value "none"
EndpointPolicyEnabledNone EndpointPolicyEnabled = "none"
// EndpointPolicyEnabledIngress captures enum value "ingress"
EndpointPolicyEnabledIngress EndpointPolicyEnabled = "ingress"
// EndpointPolicyEnabledEgress captures enum value "egress"
EndpointPolicyEnabledEgress EndpointPolicyEnabled = "egress"
// EndpointPolicyEnabledBoth captures enum value "both"
EndpointPolicyEnabledBoth EndpointPolicyEnabled = "both"
// EndpointPolicyEnabledAuditDashIngress captures enum value "audit-ingress"
EndpointPolicyEnabledAuditDashIngress EndpointPolicyEnabled = "audit-ingress"
// EndpointPolicyEnabledAuditDashEgress captures enum value "audit-egress"
EndpointPolicyEnabledAuditDashEgress EndpointPolicyEnabled = "audit-egress"
// EndpointPolicyEnabledAuditDashBoth captures enum value "audit-both"
EndpointPolicyEnabledAuditDashBoth EndpointPolicyEnabled = "audit-both"
)
// for schema
var endpointPolicyEnabledEnum []interface{}
func init() {
var res []EndpointPolicyEnabled
if err := json.Unmarshal([]byte(`["none","ingress","egress","both","audit-ingress","audit-egress","audit-both"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointPolicyEnabledEnum = append(endpointPolicyEnabledEnum, v)
}
}
func (m EndpointPolicyEnabled) validateEndpointPolicyEnabledEnum(path, location string, value EndpointPolicyEnabled) error {
if err := validate.EnumCase(path, location, value, endpointPolicyEnabledEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint policy enabled
func (m EndpointPolicyEnabled) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointPolicyEnabledEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint policy enabled based on context it is used
func (m EndpointPolicyEnabled) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointPolicyStatus Policy information of an endpoint
//
// swagger:model EndpointPolicyStatus
type EndpointPolicyStatus struct {
// The policy revision currently enforced in the proxy for this endpoint
ProxyPolicyRevision int64 `json:"proxy-policy-revision,omitempty"`
// Statistics of the proxy redirects configured for this endpoint
ProxyStatistics []*ProxyStatistics `json:"proxy-statistics"`
// The policy in the datapath for this endpoint
Realized *EndpointPolicy `json:"realized,omitempty"`
// The policy that should apply to this endpoint
Spec *EndpointPolicy `json:"spec,omitempty"`
}
// Validate validates this endpoint policy status
func (m *EndpointPolicyStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProxyStatistics(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicyStatus) validateProxyStatistics(formats strfmt.Registry) error {
if swag.IsZero(m.ProxyStatistics) { // not required
return nil
}
for i := 0; i < len(m.ProxyStatistics); i++ {
if swag.IsZero(m.ProxyStatistics[i]) { // not required
continue
}
if m.ProxyStatistics[i] != nil {
if err := m.ProxyStatistics[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointPolicyStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointPolicyStatus) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint policy status based on the context it is used
func (m *EndpointPolicyStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateProxyStatistics(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateProxyStatistics(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.ProxyStatistics); i++ {
if m.ProxyStatistics[i] != nil {
if swag.IsZero(m.ProxyStatistics[i]) { // not required
return nil
}
if err := m.ProxyStatistics[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy-statistics" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointPolicyStatus) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointPolicyStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointPolicyStatus) UnmarshalBinary(b []byte) error {
var res EndpointPolicyStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// EndpointState State of endpoint
//
// swagger:model EndpointState
type EndpointState string
func NewEndpointState(value EndpointState) *EndpointState {
return &value
}
// Pointer returns a pointer to a freshly-allocated EndpointState.
func (m EndpointState) Pointer() *EndpointState {
return &m
}
const (
// EndpointStateWaitingDashForDashIdentity captures enum value "waiting-for-identity"
EndpointStateWaitingDashForDashIdentity EndpointState = "waiting-for-identity"
// EndpointStateNotDashReady captures enum value "not-ready"
EndpointStateNotDashReady EndpointState = "not-ready"
// EndpointStateWaitingDashToDashRegenerate captures enum value "waiting-to-regenerate"
EndpointStateWaitingDashToDashRegenerate EndpointState = "waiting-to-regenerate"
// EndpointStateRegenerating captures enum value "regenerating"
EndpointStateRegenerating EndpointState = "regenerating"
// EndpointStateRestoring captures enum value "restoring"
EndpointStateRestoring EndpointState = "restoring"
// EndpointStateReady captures enum value "ready"
EndpointStateReady EndpointState = "ready"
// EndpointStateDisconnecting captures enum value "disconnecting"
EndpointStateDisconnecting EndpointState = "disconnecting"
// EndpointStateDisconnected captures enum value "disconnected"
EndpointStateDisconnected EndpointState = "disconnected"
// EndpointStateInvalid captures enum value "invalid"
EndpointStateInvalid EndpointState = "invalid"
)
// for schema
var endpointStateEnum []interface{}
func init() {
var res []EndpointState
if err := json.Unmarshal([]byte(`["waiting-for-identity","not-ready","waiting-to-regenerate","regenerating","restoring","ready","disconnecting","disconnected","invalid"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointStateEnum = append(endpointStateEnum, v)
}
}
func (m EndpointState) validateEndpointStateEnum(path, location string, value EndpointState) error {
if err := validate.EnumCase(path, location, value, endpointStateEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this endpoint state
func (m EndpointState) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateEndpointStateEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this endpoint state based on context it is used
func (m EndpointState) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointStatus The current state and configuration of the endpoint, its policy & datapath, and subcomponents
//
// swagger:model EndpointStatus
type EndpointStatus struct {
// Status of internal controllers attached to this endpoint
Controllers ControllerStatuses `json:"controllers,omitempty"`
// Unique identifiers for this endpoint from outside cilium
ExternalIdentifiers *EndpointIdentifiers `json:"external-identifiers,omitempty"`
// Summary overall endpoint & subcomponent health
Health *EndpointHealth `json:"health,omitempty"`
// The security identity for this endpoint
Identity *Identity `json:"identity,omitempty"`
// Labels applied to this endpoint
Labels *LabelConfigurationStatus `json:"labels,omitempty"`
// Most recent status log. See endpoint/{id}/log for the complete log.
Log EndpointStatusLog `json:"log,omitempty"`
// List of named ports that can be used in Network Policy
NamedPorts NamedPorts `json:"namedPorts,omitempty"`
// Networking properties of the endpoint
Networking *EndpointNetworking `json:"networking,omitempty"`
// The policy applied to this endpoint from the policy repository
Policy *EndpointPolicyStatus `json:"policy,omitempty"`
// The configuration in effect on this endpoint
Realized *EndpointConfigurationSpec `json:"realized,omitempty"`
// Current state of endpoint
// Required: true
State *EndpointState `json:"state"`
}
// Validate validates this endpoint status
func (m *EndpointStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateControllers(formats); err != nil {
res = append(res, err)
}
if err := m.validateExternalIdentifiers(formats); err != nil {
res = append(res, err)
}
if err := m.validateHealth(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if err := m.validateLog(formats); err != nil {
res = append(res, err)
}
if err := m.validateNamedPorts(formats); err != nil {
res = append(res, err)
}
if err := m.validateNetworking(formats); err != nil {
res = append(res, err)
}
if err := m.validatePolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatus) validateControllers(formats strfmt.Registry) error {
if swag.IsZero(m.Controllers) { // not required
return nil
}
if err := m.Controllers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *EndpointStatus) validateExternalIdentifiers(formats strfmt.Registry) error {
if swag.IsZero(m.ExternalIdentifiers) { // not required
return nil
}
if m.ExternalIdentifiers != nil {
if err := m.ExternalIdentifiers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("external-identifiers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("external-identifiers")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateHealth(formats strfmt.Registry) error {
if swag.IsZero(m.Health) { // not required
return nil
}
if m.Health != nil {
if err := m.Health.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateIdentity(formats strfmt.Registry) error {
if swag.IsZero(m.Identity) { // not required
return nil
}
if m.Identity != nil {
if err := m.Identity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if m.Labels != nil {
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateLog(formats strfmt.Registry) error {
if swag.IsZero(m.Log) { // not required
return nil
}
if err := m.Log.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("log")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("log")
}
return err
}
return nil
}
func (m *EndpointStatus) validateNamedPorts(formats strfmt.Registry) error {
if swag.IsZero(m.NamedPorts) { // not required
return nil
}
if err := m.NamedPorts.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("namedPorts")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("namedPorts")
}
return err
}
return nil
}
func (m *EndpointStatus) validateNetworking(formats strfmt.Registry) error {
if swag.IsZero(m.Networking) { // not required
return nil
}
if m.Networking != nil {
if err := m.Networking.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("networking")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("networking")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validatePolicy(formats strfmt.Registry) error {
if swag.IsZero(m.Policy) { // not required
return nil
}
if m.Policy != nil {
if err := m.Policy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointStatus) validateState(formats strfmt.Registry) error {
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if err := validate.Required("state", "body", m.State); err != nil {
return err
}
if m.State != nil {
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// ContextValidate validate this endpoint status based on the context it is used
func (m *EndpointStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateControllers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateExternalIdentifiers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHealth(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIdentity(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLog(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNamedPorts(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNetworking(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePolicy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatus) contextValidateControllers(ctx context.Context, formats strfmt.Registry) error {
if err := m.Controllers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateExternalIdentifiers(ctx context.Context, formats strfmt.Registry) error {
if m.ExternalIdentifiers != nil {
if swag.IsZero(m.ExternalIdentifiers) { // not required
return nil
}
if err := m.ExternalIdentifiers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("external-identifiers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("external-identifiers")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateHealth(ctx context.Context, formats strfmt.Registry) error {
if m.Health != nil {
if swag.IsZero(m.Health) { // not required
return nil
}
if err := m.Health.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateIdentity(ctx context.Context, formats strfmt.Registry) error {
if m.Identity != nil {
if swag.IsZero(m.Identity) { // not required
return nil
}
if err := m.Identity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if m.Labels != nil {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateLog(ctx context.Context, formats strfmt.Registry) error {
if err := m.Log.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("log")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("log")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateNamedPorts(ctx context.Context, formats strfmt.Registry) error {
if err := m.NamedPorts.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("namedPorts")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("namedPorts")
}
return err
}
return nil
}
func (m *EndpointStatus) contextValidateNetworking(ctx context.Context, formats strfmt.Registry) error {
if m.Networking != nil {
if swag.IsZero(m.Networking) { // not required
return nil
}
if err := m.Networking.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("networking")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("networking")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error {
if m.Policy != nil {
if swag.IsZero(m.Policy) { // not required
return nil
}
if err := m.Policy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("policy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("policy")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *EndpointStatus) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if m.State != nil {
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointStatus) UnmarshalBinary(b []byte) error {
var res EndpointStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// EndpointStatusChange Indication of a change of status
//
// +deepequal-gen=true
//
// swagger:model EndpointStatusChange
type EndpointStatusChange struct {
// Code indicate type of status change
// Enum: ["ok","failed"]
Code string `json:"code,omitempty"`
// Status message
Message string `json:"message,omitempty"`
// state
State EndpointState `json:"state,omitempty"`
// Timestamp when status change occurred
Timestamp string `json:"timestamp,omitempty"`
}
// Validate validates this endpoint status change
func (m *EndpointStatusChange) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCode(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var endpointStatusChangeTypeCodePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","failed"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
endpointStatusChangeTypeCodePropEnum = append(endpointStatusChangeTypeCodePropEnum, v)
}
}
const (
// EndpointStatusChangeCodeOk captures enum value "ok"
EndpointStatusChangeCodeOk string = "ok"
// EndpointStatusChangeCodeFailed captures enum value "failed"
EndpointStatusChangeCodeFailed string = "failed"
)
// prop value enum
func (m *EndpointStatusChange) validateCodeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, endpointStatusChangeTypeCodePropEnum, true); err != nil {
return err
}
return nil
}
func (m *EndpointStatusChange) validateCode(formats strfmt.Registry) error {
if swag.IsZero(m.Code) { // not required
return nil
}
// value enum
if err := m.validateCodeEnum("code", "body", m.Code); err != nil {
return err
}
return nil
}
func (m *EndpointStatusChange) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
if err := m.State.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
return nil
}
// ContextValidate validate this endpoint status change based on the context it is used
func (m *EndpointStatusChange) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateState(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *EndpointStatusChange) contextValidateState(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
if err := m.State.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("state")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("state")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *EndpointStatusChange) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *EndpointStatusChange) UnmarshalBinary(b []byte) error {
var res EndpointStatusChange
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// EndpointStatusLog Status log of endpoint
//
// swagger:model EndpointStatusLog
type EndpointStatusLog []*EndpointStatusChange
// Validate validates this endpoint status log
func (m EndpointStatusLog) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this endpoint status log based on the context it is used
func (m EndpointStatusLog) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Error error
//
// swagger:model Error
type Error string
// Validate validates this error
func (m Error) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this error based on context it is used
func (m Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// FrontendAddress Layer 4 address. The protocol is currently ignored, all services will
// behave as if protocol any is specified. To restrict to a particular
// protocol, use policy.
//
// swagger:model FrontendAddress
type FrontendAddress struct {
// Layer 3 address
IP string `json:"ip,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Layer 4 protocol
// Enum: ["tcp","udp","any"]
Protocol string `json:"protocol,omitempty"`
// Load balancing scope for frontend address
// Enum: ["external","internal"]
Scope string `json:"scope,omitempty"`
}
// Validate validates this frontend address
func (m *FrontendAddress) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProtocol(formats); err != nil {
res = append(res, err)
}
if err := m.validateScope(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var frontendAddressTypeProtocolPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["tcp","udp","any"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
frontendAddressTypeProtocolPropEnum = append(frontendAddressTypeProtocolPropEnum, v)
}
}
const (
// FrontendAddressProtocolTCP captures enum value "tcp"
FrontendAddressProtocolTCP string = "tcp"
// FrontendAddressProtocolUDP captures enum value "udp"
FrontendAddressProtocolUDP string = "udp"
// FrontendAddressProtocolAny captures enum value "any"
FrontendAddressProtocolAny string = "any"
)
// prop value enum
func (m *FrontendAddress) validateProtocolEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, frontendAddressTypeProtocolPropEnum, true); err != nil {
return err
}
return nil
}
func (m *FrontendAddress) validateProtocol(formats strfmt.Registry) error {
if swag.IsZero(m.Protocol) { // not required
return nil
}
// value enum
if err := m.validateProtocolEnum("protocol", "body", m.Protocol); err != nil {
return err
}
return nil
}
var frontendAddressTypeScopePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["external","internal"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
frontendAddressTypeScopePropEnum = append(frontendAddressTypeScopePropEnum, v)
}
}
const (
// FrontendAddressScopeExternal captures enum value "external"
FrontendAddressScopeExternal string = "external"
// FrontendAddressScopeInternal captures enum value "internal"
FrontendAddressScopeInternal string = "internal"
)
// prop value enum
func (m *FrontendAddress) validateScopeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, frontendAddressTypeScopePropEnum, true); err != nil {
return err
}
return nil
}
func (m *FrontendAddress) validateScope(formats strfmt.Registry) error {
if swag.IsZero(m.Scope) { // not required
return nil
}
// value enum
if err := m.validateScopeEnum("scope", "body", m.Scope); err != nil {
return err
}
return nil
}
// ContextValidate validates this frontend address based on context it is used
func (m *FrontendAddress) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *FrontendAddress) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *FrontendAddress) UnmarshalBinary(b []byte) error {
var res FrontendAddress
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// FrontendMapping Mapping of frontend to backend pods of an LRP
//
// swagger:model FrontendMapping
type FrontendMapping struct {
// Pod backends of an LRP
Backends []*LRPBackend `json:"backends"`
// frontend address
FrontendAddress *FrontendAddress `json:"frontend-address,omitempty"`
}
// Validate validates this frontend mapping
func (m *FrontendMapping) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackends(formats); err != nil {
res = append(res, err)
}
if err := m.validateFrontendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *FrontendMapping) validateBackends(formats strfmt.Registry) error {
if swag.IsZero(m.Backends) { // not required
return nil
}
for i := 0; i < len(m.Backends); i++ {
if swag.IsZero(m.Backends[i]) { // not required
continue
}
if m.Backends[i] != nil {
if err := m.Backends[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backends" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backends" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *FrontendMapping) validateFrontendAddress(formats strfmt.Registry) error {
if swag.IsZero(m.FrontendAddress) { // not required
return nil
}
if m.FrontendAddress != nil {
if err := m.FrontendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this frontend mapping based on the context it is used
func (m *FrontendMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackends(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFrontendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *FrontendMapping) contextValidateBackends(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Backends); i++ {
if m.Backends[i] != nil {
if swag.IsZero(m.Backends[i]) { // not required
return nil
}
if err := m.Backends[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backends" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backends" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *FrontendMapping) contextValidateFrontendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.FrontendAddress != nil {
if swag.IsZero(m.FrontendAddress) { // not required
return nil
}
if err := m.FrontendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *FrontendMapping) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *FrontendMapping) UnmarshalBinary(b []byte) error {
var res FrontendMapping
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// HostFirewall Status of the host firewall
//
// +k8s:deepcopy-gen=true
//
// swagger:model HostFirewall
type HostFirewall struct {
// devices
Devices []string `json:"devices"`
// mode
// Enum: ["Disabled","Enabled"]
Mode string `json:"mode,omitempty"`
}
// Validate validates this host firewall
func (m *HostFirewall) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var hostFirewallTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Disabled","Enabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hostFirewallTypeModePropEnum = append(hostFirewallTypeModePropEnum, v)
}
}
const (
// HostFirewallModeDisabled captures enum value "Disabled"
HostFirewallModeDisabled string = "Disabled"
// HostFirewallModeEnabled captures enum value "Enabled"
HostFirewallModeEnabled string = "Enabled"
)
// prop value enum
func (m *HostFirewall) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hostFirewallTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HostFirewall) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this host firewall based on context it is used
func (m *HostFirewall) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HostFirewall) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HostFirewall) UnmarshalBinary(b []byte) error {
var res HostFirewall
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// HubbleStatus Status of the Hubble server
//
// +k8s:deepcopy-gen=true
//
// swagger:model HubbleStatus
type HubbleStatus struct {
// metrics
Metrics *HubbleStatusMetrics `json:"metrics,omitempty"`
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// observer
Observer *HubbleStatusObserver `json:"observer,omitempty"`
// State the component is in
// Enum: ["Ok","Warning","Failure","Disabled"]
State string `json:"state,omitempty"`
}
// Validate validates this hubble status
func (m *HubbleStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateMetrics(formats); err != nil {
res = append(res, err)
}
if err := m.validateObserver(formats); err != nil {
res = append(res, err)
}
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatus) validateMetrics(formats strfmt.Registry) error {
if swag.IsZero(m.Metrics) { // not required
return nil
}
if m.Metrics != nil {
if err := m.Metrics.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metrics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metrics")
}
return err
}
}
return nil
}
func (m *HubbleStatus) validateObserver(formats strfmt.Registry) error {
if swag.IsZero(m.Observer) { // not required
return nil
}
if m.Observer != nil {
if err := m.Observer.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("observer")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("observer")
}
return err
}
}
return nil
}
var hubbleStatusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hubbleStatusTypeStatePropEnum = append(hubbleStatusTypeStatePropEnum, v)
}
}
const (
// HubbleStatusStateOk captures enum value "Ok"
HubbleStatusStateOk string = "Ok"
// HubbleStatusStateWarning captures enum value "Warning"
HubbleStatusStateWarning string = "Warning"
// HubbleStatusStateFailure captures enum value "Failure"
HubbleStatusStateFailure string = "Failure"
// HubbleStatusStateDisabled captures enum value "Disabled"
HubbleStatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *HubbleStatus) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hubbleStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HubbleStatus) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validate this hubble status based on the context it is used
func (m *HubbleStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMetrics(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateObserver(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatus) contextValidateMetrics(ctx context.Context, formats strfmt.Registry) error {
if m.Metrics != nil {
if swag.IsZero(m.Metrics) { // not required
return nil
}
if err := m.Metrics.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metrics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metrics")
}
return err
}
}
return nil
}
func (m *HubbleStatus) contextValidateObserver(ctx context.Context, formats strfmt.Registry) error {
if m.Observer != nil {
if swag.IsZero(m.Observer) { // not required
return nil
}
if err := m.Observer.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("observer")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("observer")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *HubbleStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleStatus) UnmarshalBinary(b []byte) error {
var res HubbleStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// HubbleStatusMetrics Status of the Hubble metrics server
//
// swagger:model HubbleStatusMetrics
type HubbleStatusMetrics struct {
// State of the Hubble metrics
// Enum: ["Ok","Warning","Failure","Disabled"]
State string `json:"state,omitempty"`
}
// Validate validates this hubble status metrics
func (m *HubbleStatusMetrics) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var hubbleStatusMetricsTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
hubbleStatusMetricsTypeStatePropEnum = append(hubbleStatusMetricsTypeStatePropEnum, v)
}
}
const (
// HubbleStatusMetricsStateOk captures enum value "Ok"
HubbleStatusMetricsStateOk string = "Ok"
// HubbleStatusMetricsStateWarning captures enum value "Warning"
HubbleStatusMetricsStateWarning string = "Warning"
// HubbleStatusMetricsStateFailure captures enum value "Failure"
HubbleStatusMetricsStateFailure string = "Failure"
// HubbleStatusMetricsStateDisabled captures enum value "Disabled"
HubbleStatusMetricsStateDisabled string = "Disabled"
)
// prop value enum
func (m *HubbleStatusMetrics) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, hubbleStatusMetricsTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *HubbleStatusMetrics) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("metrics"+"."+"state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this hubble status metrics based on context it is used
func (m *HubbleStatusMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HubbleStatusMetrics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleStatusMetrics) UnmarshalBinary(b []byte) error {
var res HubbleStatusMetrics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// HubbleStatusObserver Status of the Hubble observer
//
// +k8s:deepcopy-gen=true
//
// swagger:model HubbleStatusObserver
type HubbleStatusObserver struct {
// Current number of flows this Hubble observer stores
CurrentFlows int64 `json:"current-flows,omitempty"`
// Maximum number of flows this Hubble observer is able to store
MaxFlows int64 `json:"max-flows,omitempty"`
// Total number of flows this Hubble observer has seen
SeenFlows int64 `json:"seen-flows,omitempty"`
// Uptime of this Hubble observer instance
// Format: duration
Uptime strfmt.Duration `json:"uptime,omitempty"`
}
// Validate validates this hubble status observer
func (m *HubbleStatusObserver) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateUptime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HubbleStatusObserver) validateUptime(formats strfmt.Registry) error {
if swag.IsZero(m.Uptime) { // not required
return nil
}
if err := validate.FormatOf("observer"+"."+"uptime", "body", "duration", m.Uptime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this hubble status observer based on context it is used
func (m *HubbleStatusObserver) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HubbleStatusObserver) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HubbleStatusObserver) UnmarshalBinary(b []byte) error {
var res HubbleStatusObserver
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPsecStatus Status of the IPsec agent
//
// +k8s:deepcopy-gen=true
//
// swagger:model IPsecStatus
type IPsecStatus struct {
// IPsec decryption interfaces
DecryptInterfaces []string `json:"decrypt-interfaces"`
// IPsec error count
ErrorCount int64 `json:"error-count,omitempty"`
// IPsec keys in use
KeysInUse int64 `json:"keys-in-use,omitempty"`
// IPsec max sequence number
MaxSeqNumber string `json:"max-seq-number,omitempty"`
// IPsec XFRM errors
XfrmErrors map[string]int64 `json:"xfrm-errors,omitempty"`
}
// Validate validates this i psec status
func (m *IPsecStatus) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this i psec status based on context it is used
func (m *IPsecStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPsecStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPsecStatus) UnmarshalBinary(b []byte) error {
var res IPsecStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Identity Security identity
//
// swagger:model Identity
type Identity struct {
// Unique identifier
ID int64 `json:"id,omitempty"`
// Labels describing the identity
Labels Labels `json:"labels,omitempty"`
// SHA256 of labels
LabelsSHA256 string `json:"labelsSHA256,omitempty"`
}
// Validate validates this identity
func (m *Identity) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Identity) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this identity based on the context it is used
func (m *Identity) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Identity) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *Identity) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Identity) UnmarshalBinary(b []byte) error {
var res Identity
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IdentityEndpoints Security identities owned by endpoints on the local node
//
// swagger:model IdentityEndpoints
type IdentityEndpoints struct {
// Security identity
Identity *Identity `json:"identity,omitempty"`
// number of endpoints consuming this identity locally (should always be > 0)
RefCount int64 `json:"refCount,omitempty"`
}
// Validate validates this identity endpoints
func (m *IdentityEndpoints) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IdentityEndpoints) validateIdentity(formats strfmt.Registry) error {
if swag.IsZero(m.Identity) { // not required
return nil
}
if m.Identity != nil {
if err := m.Identity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
// ContextValidate validate this identity endpoints based on the context it is used
func (m *IdentityEndpoints) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIdentity(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IdentityEndpoints) contextValidateIdentity(ctx context.Context, formats strfmt.Registry) error {
if m.Identity != nil {
if swag.IsZero(m.Identity) { // not required
return nil
}
if err := m.Identity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IdentityEndpoints) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IdentityEndpoints) UnmarshalBinary(b []byte) error {
var res IdentityEndpoints
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IdentityRange Status of identity range of the cluster
//
// swagger:model IdentityRange
type IdentityRange struct {
// Maximum identity of the cluster
MaxIdentity int64 `json:"max-identity,omitempty"`
// Minimum identity of the cluster
MinIdentity int64 `json:"min-identity,omitempty"`
}
// Validate validates this identity range
func (m *IdentityRange) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this identity range based on context it is used
func (m *IdentityRange) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IdentityRange) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IdentityRange) UnmarshalBinary(b []byte) error {
var res IdentityRange
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPAMAddressResponse IPAM configuration of an individual address family
//
// swagger:model IPAMAddressResponse
type IPAMAddressResponse struct {
// List of CIDRs out of which IPs are allocated
Cidrs []string `json:"cidrs"`
// The UUID for the expiration timer. Set when expiration has been
// enabled while allocating.
//
ExpirationUUID string `json:"expiration-uuid,omitempty"`
// IP of gateway
Gateway string `json:"gateway,omitempty"`
// InterfaceNumber is a field for generically identifying an interface. This is only useful in ENI mode.
//
InterfaceNumber string `json:"interface-number,omitempty"`
// Allocated IP for endpoint
IP string `json:"ip,omitempty"`
// MAC of master interface if address is a slave/secondary of a master interface
MasterMac string `json:"master-mac,omitempty"`
}
// Validate validates this IP a m address response
func (m *IPAMAddressResponse) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP a m address response based on context it is used
func (m *IPAMAddressResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPAMAddressResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMAddressResponse) UnmarshalBinary(b []byte) error {
var res IPAMAddressResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// IPAMResponse IPAM configuration of an endpoint
//
// swagger:model IPAMResponse
type IPAMResponse struct {
// address
// Required: true
Address *AddressPair `json:"address"`
// host addressing
// Required: true
HostAddressing *NodeAddressing `json:"host-addressing"`
// ipv4
IPV4 *IPAMAddressResponse `json:"ipv4,omitempty"`
// ipv6
IPV6 *IPAMAddressResponse `json:"ipv6,omitempty"`
}
// Validate validates this IP a m response
func (m *IPAMResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostAddressing(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV4(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMResponse) validateAddress(formats strfmt.Registry) error {
if err := validate.Required("address", "body", m.Address); err != nil {
return err
}
if m.Address != nil {
if err := m.Address.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("address")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateHostAddressing(formats strfmt.Registry) error {
if err := validate.Required("host-addressing", "body", m.HostAddressing); err != nil {
return err
}
if m.HostAddressing != nil {
if err := m.HostAddressing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateIPV4(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if m.IPV4 != nil {
if err := m.IPV4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *IPAMResponse) validateIPV6(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if m.IPV6 != nil {
if err := m.IPV6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// ContextValidate validate this IP a m response based on the context it is used
func (m *IPAMResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostAddressing(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMResponse) contextValidateAddress(ctx context.Context, formats strfmt.Registry) error {
if m.Address != nil {
if err := m.Address.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("address")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateHostAddressing(ctx context.Context, formats strfmt.Registry) error {
if m.HostAddressing != nil {
if err := m.HostAddressing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-addressing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-addressing")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateIPV4(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4 != nil {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if err := m.IPV4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *IPAMResponse) contextValidateIPV6(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6 != nil {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if err := m.IPV6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IPAMResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMResponse) UnmarshalBinary(b []byte) error {
var res IPAMResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPAMStatus Status of IP address management
//
// +k8s:deepcopy-gen=true
//
// swagger:model IPAMStatus
type IPAMStatus struct {
// allocations
Allocations AllocationMap `json:"allocations,omitempty"`
// ipv4
IPV4 []string `json:"ipv4"`
// ipv6
IPV6 []string `json:"ipv6"`
// status
Status string `json:"status,omitempty"`
}
// Validate validates this IP a m status
func (m *IPAMStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAllocations(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMStatus) validateAllocations(formats strfmt.Registry) error {
if swag.IsZero(m.Allocations) { // not required
return nil
}
if m.Allocations != nil {
if err := m.Allocations.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("allocations")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("allocations")
}
return err
}
}
return nil
}
// ContextValidate validate this IP a m status based on the context it is used
func (m *IPAMStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAllocations(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPAMStatus) contextValidateAllocations(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.Allocations) { // not required
return nil
}
if err := m.Allocations.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("allocations")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("allocations")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *IPAMStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPAMStatus) UnmarshalBinary(b []byte) error {
var res IPAMStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// IPListEntry IP entry with metadata
//
// swagger:model IPListEntry
type IPListEntry struct {
// Key of the entry in the form of a CIDR range
// Required: true
Cidr *string `json:"cidr"`
// The context ID for the encryption session
EncryptKey int64 `json:"encryptKey,omitempty"`
// IP address of the host
HostIP string `json:"hostIP,omitempty"`
// Numerical identity assigned to the IP
// Required: true
Identity *int64 `json:"identity"`
// metadata
Metadata *IPListEntryMetadata `json:"metadata,omitempty"`
}
// Validate validates this IP list entry
func (m *IPListEntry) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCidr(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentity(formats); err != nil {
res = append(res, err)
}
if err := m.validateMetadata(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPListEntry) validateCidr(formats strfmt.Registry) error {
if err := validate.Required("cidr", "body", m.Cidr); err != nil {
return err
}
return nil
}
func (m *IPListEntry) validateIdentity(formats strfmt.Registry) error {
if err := validate.Required("identity", "body", m.Identity); err != nil {
return err
}
return nil
}
func (m *IPListEntry) validateMetadata(formats strfmt.Registry) error {
if swag.IsZero(m.Metadata) { // not required
return nil
}
if m.Metadata != nil {
if err := m.Metadata.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metadata")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metadata")
}
return err
}
}
return nil
}
// ContextValidate validate this IP list entry based on the context it is used
func (m *IPListEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateMetadata(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *IPListEntry) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error {
if m.Metadata != nil {
if swag.IsZero(m.Metadata) { // not required
return nil
}
if err := m.Metadata.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metadata")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("metadata")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *IPListEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPListEntry) UnmarshalBinary(b []byte) error {
var res IPListEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPListEntryMetadata Additional metadata assigned to an IP list entry
//
// swagger:model IPListEntryMetadata
type IPListEntryMetadata struct {
// Name assigned to the IP (e.g. Kubernetes pod name)
Name string `json:"name,omitempty"`
// Namespace of the IP (e.g. Kubernetes namespace)
Namespace string `json:"namespace,omitempty"`
// Source of the IP entry and its metadata
// Example: k8s
Source string `json:"source,omitempty"`
}
// Validate validates this IP list entry metadata
func (m *IPListEntryMetadata) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP list entry metadata based on context it is used
func (m *IPListEntryMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPListEntryMetadata) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPListEntryMetadata) UnmarshalBinary(b []byte) error {
var res IPListEntryMetadata
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPV4BigTCP Status of IPv4 BIG TCP
//
// swagger:model IPV4BigTCP
type IPV4BigTCP struct {
// Is IPv4 BIG TCP enabled
Enabled bool `json:"enabled,omitempty"`
// Maximum IPv4 GRO size
MaxGRO int64 `json:"maxGRO,omitempty"`
// Maximum IPv4 GSO size
MaxGSO int64 `json:"maxGSO,omitempty"`
}
// Validate validates this IP v4 big TCP
func (m *IPV4BigTCP) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP v4 big TCP based on context it is used
func (m *IPV4BigTCP) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPV4BigTCP) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPV4BigTCP) UnmarshalBinary(b []byte) error {
var res IPV4BigTCP
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// IPV6BigTCP Status of IPv6 BIG TCP
//
// swagger:model IPV6BigTCP
type IPV6BigTCP struct {
// Is IPv6 BIG TCP enabled
Enabled bool `json:"enabled,omitempty"`
// Maximum IPv6 GRO size
MaxGRO int64 `json:"maxGRO,omitempty"`
// Maximum IPv6 GSO size
MaxGSO int64 `json:"maxGSO,omitempty"`
}
// Validate validates this IP v6 big TCP
func (m *IPV6BigTCP) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this IP v6 big TCP based on context it is used
func (m *IPV6BigTCP) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *IPV6BigTCP) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *IPV6BigTCP) UnmarshalBinary(b []byte) error {
var res IPV6BigTCP
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// K8sStatus Status of Kubernetes integration
//
// +k8s:deepcopy-gen=true
//
// swagger:model K8sStatus
type K8sStatus struct {
// k8s api versions
K8sAPIVersions []string `json:"k8s-api-versions"`
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// State the component is in
// Enum: ["Ok","Warning","Failure","Disabled"]
State string `json:"state,omitempty"`
}
// Validate validates this k8s status
func (m *K8sStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var k8sStatusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
k8sStatusTypeStatePropEnum = append(k8sStatusTypeStatePropEnum, v)
}
}
const (
// K8sStatusStateOk captures enum value "Ok"
K8sStatusStateOk string = "Ok"
// K8sStatusStateWarning captures enum value "Warning"
K8sStatusStateWarning string = "Warning"
// K8sStatusStateFailure captures enum value "Failure"
K8sStatusStateFailure string = "Failure"
// K8sStatusStateDisabled captures enum value "Disabled"
K8sStatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *K8sStatus) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, k8sStatusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *K8sStatus) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this k8s status based on context it is used
func (m *K8sStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *K8sStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *K8sStatus) UnmarshalBinary(b []byte) error {
var res K8sStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// KVstoreConfiguration Configuration used for the kvstore
//
// swagger:model KVstoreConfiguration
type KVstoreConfiguration struct {
// Configuration options
Options map[string]string `json:"options,omitempty"`
// Type of kvstore
Type string `json:"type,omitempty"`
}
// Validate validates this k vstore configuration
func (m *KVstoreConfiguration) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this k vstore configuration based on context it is used
func (m *KVstoreConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KVstoreConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KVstoreConfiguration) UnmarshalBinary(b []byte) error {
var res KVstoreConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// KubeProxyReplacement Status of kube-proxy replacement
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacement
type KubeProxyReplacement struct {
//
//
// +k8s:deepcopy-gen=true
DeviceList []*KubeProxyReplacementDeviceListItems0 `json:"deviceList"`
// devices
Devices []string `json:"devices"`
// direct routing device
DirectRoutingDevice string `json:"directRoutingDevice,omitempty"`
// features
Features *KubeProxyReplacementFeatures `json:"features,omitempty"`
// mode
// Enum: ["True","False"]
Mode string `json:"mode,omitempty"`
}
// Validate validates this kube proxy replacement
func (m *KubeProxyReplacement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDeviceList(formats); err != nil {
res = append(res, err)
}
if err := m.validateFeatures(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacement) validateDeviceList(formats strfmt.Registry) error {
if swag.IsZero(m.DeviceList) { // not required
return nil
}
for i := 0; i < len(m.DeviceList); i++ {
if swag.IsZero(m.DeviceList[i]) { // not required
continue
}
if m.DeviceList[i] != nil {
if err := m.DeviceList[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("deviceList" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("deviceList" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *KubeProxyReplacement) validateFeatures(formats strfmt.Registry) error {
if swag.IsZero(m.Features) { // not required
return nil
}
if m.Features != nil {
if err := m.Features.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features")
}
return err
}
}
return nil
}
var kubeProxyReplacementTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["True","False"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementTypeModePropEnum = append(kubeProxyReplacementTypeModePropEnum, v)
}
}
const (
// KubeProxyReplacementModeTrue captures enum value "True"
KubeProxyReplacementModeTrue string = "True"
// KubeProxyReplacementModeFalse captures enum value "False"
KubeProxyReplacementModeFalse string = "False"
)
// prop value enum
func (m *KubeProxyReplacement) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacement) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validate this kube proxy replacement based on the context it is used
func (m *KubeProxyReplacement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDeviceList(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFeatures(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacement) contextValidateDeviceList(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.DeviceList); i++ {
if m.DeviceList[i] != nil {
if swag.IsZero(m.DeviceList[i]) { // not required
return nil
}
if err := m.DeviceList[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("deviceList" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("deviceList" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *KubeProxyReplacement) contextValidateFeatures(ctx context.Context, formats strfmt.Registry) error {
if m.Features != nil {
if swag.IsZero(m.Features) { // not required
return nil
}
if err := m.Features.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacement) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementDeviceListItems0
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementDeviceListItems0
type KubeProxyReplacementDeviceListItems0 struct {
//
//
// +k8s:deepcopy-gen=true
IP []string `json:"ip"`
// name
Name string `json:"name,omitempty"`
}
// Validate validates this kube proxy replacement device list items0
func (m *KubeProxyReplacementDeviceListItems0) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement device list items0 based on context it is used
func (m *KubeProxyReplacementDeviceListItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementDeviceListItems0) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementDeviceListItems0) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementDeviceListItems0
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeatures
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeatures
type KubeProxyReplacementFeatures struct {
// annotations
Annotations []string `json:"annotations"`
// flag bpf-lb-sock-hostns-only
BpfSocketLBHostnsOnly bool `json:"bpfSocketLBHostnsOnly,omitempty"`
// external i ps
ExternalIPs *KubeProxyReplacementFeaturesExternalIPs `json:"externalIPs,omitempty"`
// graceful termination
GracefulTermination *KubeProxyReplacementFeaturesGracefulTermination `json:"gracefulTermination,omitempty"`
// host port
HostPort *KubeProxyReplacementFeaturesHostPort `json:"hostPort,omitempty"`
// host reachable services
HostReachableServices *KubeProxyReplacementFeaturesHostReachableServices `json:"hostReachableServices,omitempty"`
// nat46 x64
Nat46X64 *KubeProxyReplacementFeaturesNat46X64 `json:"nat46X64,omitempty"`
// node port
NodePort *KubeProxyReplacementFeaturesNodePort `json:"nodePort,omitempty"`
// session affinity
SessionAffinity *KubeProxyReplacementFeaturesSessionAffinity `json:"sessionAffinity,omitempty"`
// socket l b
SocketLB *KubeProxyReplacementFeaturesSocketLB `json:"socketLB,omitempty"`
// socket l b tracing
SocketLBTracing *KubeProxyReplacementFeaturesSocketLBTracing `json:"socketLBTracing,omitempty"`
}
// Validate validates this kube proxy replacement features
func (m *KubeProxyReplacementFeatures) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExternalIPs(formats); err != nil {
res = append(res, err)
}
if err := m.validateGracefulTermination(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostPort(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostReachableServices(formats); err != nil {
res = append(res, err)
}
if err := m.validateNat46X64(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodePort(formats); err != nil {
res = append(res, err)
}
if err := m.validateSessionAffinity(formats); err != nil {
res = append(res, err)
}
if err := m.validateSocketLB(formats); err != nil {
res = append(res, err)
}
if err := m.validateSocketLBTracing(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateExternalIPs(formats strfmt.Registry) error {
if swag.IsZero(m.ExternalIPs) { // not required
return nil
}
if m.ExternalIPs != nil {
if err := m.ExternalIPs.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "externalIPs")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "externalIPs")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateGracefulTermination(formats strfmt.Registry) error {
if swag.IsZero(m.GracefulTermination) { // not required
return nil
}
if m.GracefulTermination != nil {
if err := m.GracefulTermination.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "gracefulTermination")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "gracefulTermination")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateHostPort(formats strfmt.Registry) error {
if swag.IsZero(m.HostPort) { // not required
return nil
}
if m.HostPort != nil {
if err := m.HostPort.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostPort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostPort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateHostReachableServices(formats strfmt.Registry) error {
if swag.IsZero(m.HostReachableServices) { // not required
return nil
}
if m.HostReachableServices != nil {
if err := m.HostReachableServices.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostReachableServices")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostReachableServices")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateNat46X64(formats strfmt.Registry) error {
if swag.IsZero(m.Nat46X64) { // not required
return nil
}
if m.Nat46X64 != nil {
if err := m.Nat46X64.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateNodePort(formats strfmt.Registry) error {
if swag.IsZero(m.NodePort) { // not required
return nil
}
if m.NodePort != nil {
if err := m.NodePort.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nodePort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nodePort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSessionAffinity(formats strfmt.Registry) error {
if swag.IsZero(m.SessionAffinity) { // not required
return nil
}
if m.SessionAffinity != nil {
if err := m.SessionAffinity.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "sessionAffinity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "sessionAffinity")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSocketLB(formats strfmt.Registry) error {
if swag.IsZero(m.SocketLB) { // not required
return nil
}
if m.SocketLB != nil {
if err := m.SocketLB.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLB")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLB")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) validateSocketLBTracing(formats strfmt.Registry) error {
if swag.IsZero(m.SocketLBTracing) { // not required
return nil
}
if m.SocketLBTracing != nil {
if err := m.SocketLBTracing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLBTracing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLBTracing")
}
return err
}
}
return nil
}
// ContextValidate validate this kube proxy replacement features based on the context it is used
func (m *KubeProxyReplacementFeatures) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateExternalIPs(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateGracefulTermination(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostPort(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostReachableServices(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNat46X64(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodePort(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSessionAffinity(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSocketLB(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSocketLBTracing(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateExternalIPs(ctx context.Context, formats strfmt.Registry) error {
if m.ExternalIPs != nil {
if swag.IsZero(m.ExternalIPs) { // not required
return nil
}
if err := m.ExternalIPs.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "externalIPs")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "externalIPs")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateGracefulTermination(ctx context.Context, formats strfmt.Registry) error {
if m.GracefulTermination != nil {
if swag.IsZero(m.GracefulTermination) { // not required
return nil
}
if err := m.GracefulTermination.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "gracefulTermination")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "gracefulTermination")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateHostPort(ctx context.Context, formats strfmt.Registry) error {
if m.HostPort != nil {
if swag.IsZero(m.HostPort) { // not required
return nil
}
if err := m.HostPort.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostPort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostPort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateHostReachableServices(ctx context.Context, formats strfmt.Registry) error {
if m.HostReachableServices != nil {
if swag.IsZero(m.HostReachableServices) { // not required
return nil
}
if err := m.HostReachableServices.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "hostReachableServices")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "hostReachableServices")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateNat46X64(ctx context.Context, formats strfmt.Registry) error {
if m.Nat46X64 != nil {
if swag.IsZero(m.Nat46X64) { // not required
return nil
}
if err := m.Nat46X64.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateNodePort(ctx context.Context, formats strfmt.Registry) error {
if m.NodePort != nil {
if swag.IsZero(m.NodePort) { // not required
return nil
}
if err := m.NodePort.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nodePort")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nodePort")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSessionAffinity(ctx context.Context, formats strfmt.Registry) error {
if m.SessionAffinity != nil {
if swag.IsZero(m.SessionAffinity) { // not required
return nil
}
if err := m.SessionAffinity.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "sessionAffinity")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "sessionAffinity")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSocketLB(ctx context.Context, formats strfmt.Registry) error {
if m.SocketLB != nil {
if swag.IsZero(m.SocketLB) { // not required
return nil
}
if err := m.SocketLB.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLB")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLB")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeatures) contextValidateSocketLBTracing(ctx context.Context, formats strfmt.Registry) error {
if m.SocketLBTracing != nil {
if swag.IsZero(m.SocketLBTracing) { // not required
return nil
}
if err := m.SocketLBTracing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "socketLBTracing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "socketLBTracing")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeatures) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeatures) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeatures
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesExternalIPs kube proxy replacement features external i ps
//
// swagger:model KubeProxyReplacementFeaturesExternalIPs
type KubeProxyReplacementFeaturesExternalIPs struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features external i ps
func (m *KubeProxyReplacementFeaturesExternalIPs) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features external i ps based on context it is used
func (m *KubeProxyReplacementFeaturesExternalIPs) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesExternalIPs) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesExternalIPs) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesExternalIPs
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesGracefulTermination kube proxy replacement features graceful termination
//
// swagger:model KubeProxyReplacementFeaturesGracefulTermination
type KubeProxyReplacementFeaturesGracefulTermination struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features graceful termination
func (m *KubeProxyReplacementFeaturesGracefulTermination) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features graceful termination based on context it is used
func (m *KubeProxyReplacementFeaturesGracefulTermination) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesGracefulTermination) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesGracefulTermination) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesGracefulTermination
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesHostPort kube proxy replacement features host port
//
// swagger:model KubeProxyReplacementFeaturesHostPort
type KubeProxyReplacementFeaturesHostPort struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features host port
func (m *KubeProxyReplacementFeaturesHostPort) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features host port based on context it is used
func (m *KubeProxyReplacementFeaturesHostPort) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostPort) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostPort) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesHostPort
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesHostReachableServices
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesHostReachableServices
type KubeProxyReplacementFeaturesHostReachableServices struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// protocols
Protocols []string `json:"protocols"`
}
// Validate validates this kube proxy replacement features host reachable services
func (m *KubeProxyReplacementFeaturesHostReachableServices) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features host reachable services based on context it is used
func (m *KubeProxyReplacementFeaturesHostReachableServices) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostReachableServices) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesHostReachableServices) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesHostReachableServices
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesNat46X64
type KubeProxyReplacementFeaturesNat46X64 struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// gateway
Gateway *KubeProxyReplacementFeaturesNat46X64Gateway `json:"gateway,omitempty"`
// service
Service *KubeProxyReplacementFeaturesNat46X64Service `json:"service,omitempty"`
}
// Validate validates this kube proxy replacement features nat46 x64
func (m *KubeProxyReplacementFeaturesNat46X64) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateGateway(formats); err != nil {
res = append(res, err)
}
if err := m.validateService(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) validateGateway(formats strfmt.Registry) error {
if swag.IsZero(m.Gateway) { // not required
return nil
}
if m.Gateway != nil {
if err := m.Gateway.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) validateService(formats strfmt.Registry) error {
if swag.IsZero(m.Service) { // not required
return nil
}
if m.Service != nil {
if err := m.Service.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "service")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "service")
}
return err
}
}
return nil
}
// ContextValidate validate this kube proxy replacement features nat46 x64 based on the context it is used
func (m *KubeProxyReplacementFeaturesNat46X64) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateGateway(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateService(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateGateway(ctx context.Context, formats strfmt.Registry) error {
if m.Gateway != nil {
if swag.IsZero(m.Gateway) { // not required
return nil
}
if err := m.Gateway.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "gateway")
}
return err
}
}
return nil
}
func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateService(ctx context.Context, formats strfmt.Registry) error {
if m.Service != nil {
if swag.IsZero(m.Service) { // not required
return nil
}
if err := m.Service.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("features" + "." + "nat46X64" + "." + "service")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("features" + "." + "nat46X64" + "." + "service")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64Gateway
//
// +k8s:deepcopy-gen=true
//
// swagger:model KubeProxyReplacementFeaturesNat46X64Gateway
type KubeProxyReplacementFeaturesNat46X64Gateway struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// prefixes
Prefixes []string `json:"prefixes"`
}
// Validate validates this kube proxy replacement features nat46 x64 gateway
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features nat46 x64 gateway based on context it is used
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Gateway) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64Gateway
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNat46X64Service kube proxy replacement features nat46 x64 service
//
// swagger:model KubeProxyReplacementFeaturesNat46X64Service
type KubeProxyReplacementFeaturesNat46X64Service struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features nat46 x64 service
func (m *KubeProxyReplacementFeaturesNat46X64Service) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features nat46 x64 service based on context it is used
func (m *KubeProxyReplacementFeaturesNat46X64Service) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Service) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNat46X64Service) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNat46X64Service
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesNodePort kube proxy replacement features node port
//
// swagger:model KubeProxyReplacementFeaturesNodePort
type KubeProxyReplacementFeaturesNodePort struct {
// acceleration
// Enum: ["None","Native","Generic","Best-Effort"]
Acceleration string `json:"acceleration,omitempty"`
// algorithm
// Enum: ["Random","Maglev"]
Algorithm string `json:"algorithm,omitempty"`
// dsr mode
// Enum: ["IP Option/Extension","IPIP","Geneve"]
DsrMode string `json:"dsrMode,omitempty"`
// enabled
Enabled bool `json:"enabled,omitempty"`
// lut size
LutSize int64 `json:"lutSize,omitempty"`
// mode
// Enum: ["SNAT","DSR","Hybrid"]
Mode string `json:"mode,omitempty"`
// port max
PortMax int64 `json:"portMax,omitempty"`
// port min
PortMin int64 `json:"portMin,omitempty"`
}
// Validate validates this kube proxy replacement features node port
func (m *KubeProxyReplacementFeaturesNodePort) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAcceleration(formats); err != nil {
res = append(res, err)
}
if err := m.validateAlgorithm(formats); err != nil {
res = append(res, err)
}
if err := m.validateDsrMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["None","Native","Generic","Best-Effort"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum = append(kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortAccelerationNone captures enum value "None"
KubeProxyReplacementFeaturesNodePortAccelerationNone string = "None"
// KubeProxyReplacementFeaturesNodePortAccelerationNative captures enum value "Native"
KubeProxyReplacementFeaturesNodePortAccelerationNative string = "Native"
// KubeProxyReplacementFeaturesNodePortAccelerationGeneric captures enum value "Generic"
KubeProxyReplacementFeaturesNodePortAccelerationGeneric string = "Generic"
// KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort captures enum value "Best-Effort"
KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort string = "Best-Effort"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateAccelerationEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateAcceleration(formats strfmt.Registry) error {
if swag.IsZero(m.Acceleration) { // not required
return nil
}
// value enum
if err := m.validateAccelerationEnum("features"+"."+"nodePort"+"."+"acceleration", "body", m.Acceleration); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Random","Maglev"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum = append(kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortAlgorithmRandom captures enum value "Random"
KubeProxyReplacementFeaturesNodePortAlgorithmRandom string = "Random"
// KubeProxyReplacementFeaturesNodePortAlgorithmMaglev captures enum value "Maglev"
KubeProxyReplacementFeaturesNodePortAlgorithmMaglev string = "Maglev"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithmEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeAlgorithmPropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithm(formats strfmt.Registry) error {
if swag.IsZero(m.Algorithm) { // not required
return nil
}
// value enum
if err := m.validateAlgorithmEnum("features"+"."+"nodePort"+"."+"algorithm", "body", m.Algorithm); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["IP Option/Extension","IPIP","Geneve"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension captures enum value "IP Option/Extension"
KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension string = "IP Option/Extension"
// KubeProxyReplacementFeaturesNodePortDsrModeIPIP captures enum value "IPIP"
KubeProxyReplacementFeaturesNodePortDsrModeIPIP string = "IPIP"
// KubeProxyReplacementFeaturesNodePortDsrModeGeneve captures enum value "Geneve"
KubeProxyReplacementFeaturesNodePortDsrModeGeneve string = "Geneve"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateDsrModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateDsrMode(formats strfmt.Registry) error {
if swag.IsZero(m.DsrMode) { // not required
return nil
}
// value enum
if err := m.validateDsrModeEnum("features"+"."+"nodePort"+"."+"dsrMode", "body", m.DsrMode); err != nil {
return err
}
return nil
}
var kubeProxyReplacementFeaturesNodePortTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["SNAT","DSR","Hybrid"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
kubeProxyReplacementFeaturesNodePortTypeModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeModePropEnum, v)
}
}
const (
// KubeProxyReplacementFeaturesNodePortModeSNAT captures enum value "SNAT"
KubeProxyReplacementFeaturesNodePortModeSNAT string = "SNAT"
// KubeProxyReplacementFeaturesNodePortModeDSR captures enum value "DSR"
KubeProxyReplacementFeaturesNodePortModeDSR string = "DSR"
// KubeProxyReplacementFeaturesNodePortModeHybrid captures enum value "Hybrid"
KubeProxyReplacementFeaturesNodePortModeHybrid string = "Hybrid"
)
// prop value enum
func (m *KubeProxyReplacementFeaturesNodePort) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *KubeProxyReplacementFeaturesNodePort) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("features"+"."+"nodePort"+"."+"mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validates this kube proxy replacement features node port based on context it is used
func (m *KubeProxyReplacementFeaturesNodePort) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNodePort) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesNodePort) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesNodePort
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSessionAffinity kube proxy replacement features session affinity
//
// swagger:model KubeProxyReplacementFeaturesSessionAffinity
type KubeProxyReplacementFeaturesSessionAffinity struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features session affinity
func (m *KubeProxyReplacementFeaturesSessionAffinity) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features session affinity based on context it is used
func (m *KubeProxyReplacementFeaturesSessionAffinity) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSessionAffinity) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSessionAffinity) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSessionAffinity
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSocketLB kube proxy replacement features socket l b
//
// swagger:model KubeProxyReplacementFeaturesSocketLB
type KubeProxyReplacementFeaturesSocketLB struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features socket l b
func (m *KubeProxyReplacementFeaturesSocketLB) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features socket l b based on context it is used
func (m *KubeProxyReplacementFeaturesSocketLB) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLB) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLB) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSocketLB
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// KubeProxyReplacementFeaturesSocketLBTracing kube proxy replacement features socket l b tracing
//
// swagger:model KubeProxyReplacementFeaturesSocketLBTracing
type KubeProxyReplacementFeaturesSocketLBTracing struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
}
// Validate validates this kube proxy replacement features socket l b tracing
func (m *KubeProxyReplacementFeaturesSocketLBTracing) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this kube proxy replacement features socket l b tracing based on context it is used
func (m *KubeProxyReplacementFeaturesSocketLBTracing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLBTracing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *KubeProxyReplacementFeaturesSocketLBTracing) UnmarshalBinary(b []byte) error {
var res KubeProxyReplacementFeaturesSocketLBTracing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// L4Policy L4 endpoint policy
//
// swagger:model L4Policy
type L4Policy struct {
// List of L4 egress rules
Egress []*PolicyRule `json:"egress"`
// List of L4 ingress rules
Ingress []*PolicyRule `json:"ingress"`
}
// Validate validates this l4 policy
func (m *L4Policy) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEgress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *L4Policy) validateEgress(formats strfmt.Registry) error {
if swag.IsZero(m.Egress) { // not required
return nil
}
for i := 0; i < len(m.Egress); i++ {
if swag.IsZero(m.Egress[i]) { // not required
continue
}
if m.Egress[i] != nil {
if err := m.Egress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *L4Policy) validateIngress(formats strfmt.Registry) error {
if swag.IsZero(m.Ingress) { // not required
return nil
}
for i := 0; i < len(m.Ingress); i++ {
if swag.IsZero(m.Ingress[i]) { // not required
continue
}
if m.Ingress[i] != nil {
if err := m.Ingress[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this l4 policy based on the context it is used
func (m *L4Policy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEgress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *L4Policy) contextValidateEgress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Egress); i++ {
if m.Egress[i] != nil {
if swag.IsZero(m.Egress[i]) { // not required
return nil
}
if err := m.Egress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("egress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("egress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *L4Policy) contextValidateIngress(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Ingress); i++ {
if m.Ingress[i] != nil {
if swag.IsZero(m.Ingress[i]) { // not required
return nil
}
if err := m.Ingress[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *L4Policy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *L4Policy) UnmarshalBinary(b []byte) error {
var res L4Policy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LRPBackend Pod backend of an LRP
//
// swagger:model LRPBackend
type LRPBackend struct {
// backend address
BackendAddress *BackendAddress `json:"backend-address,omitempty"`
// Namespace and name of the backend pod
PodID string `json:"pod-id,omitempty"`
}
// Validate validates this l r p backend
func (m *LRPBackend) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPBackend) validateBackendAddress(formats strfmt.Registry) error {
if swag.IsZero(m.BackendAddress) { // not required
return nil
}
if m.BackendAddress != nil {
if err := m.BackendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this l r p backend based on the context it is used
func (m *LRPBackend) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPBackend) contextValidateBackendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.BackendAddress != nil {
if swag.IsZero(m.BackendAddress) { // not required
return nil
}
if err := m.BackendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LRPBackend) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LRPBackend) UnmarshalBinary(b []byte) error {
var res LRPBackend
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LRPSpec Configuration of an LRP
//
// swagger:model LRPSpec
type LRPSpec struct {
// mapping of frontends to pod backends
FrontendMappings []*FrontendMapping `json:"frontend-mappings"`
// LRP frontend type
FrontendType string `json:"frontend-type,omitempty"`
// LRP config type
LrpType string `json:"lrp-type,omitempty"`
// LRP service name
Name string `json:"name,omitempty"`
// LRP service namespace
Namespace string `json:"namespace,omitempty"`
// matching k8s service namespace and name
ServiceID string `json:"service-id,omitempty"`
// Unique identification
UID string `json:"uid,omitempty"`
}
// Validate validates this l r p spec
func (m *LRPSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFrontendMappings(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPSpec) validateFrontendMappings(formats strfmt.Registry) error {
if swag.IsZero(m.FrontendMappings) { // not required
return nil
}
for i := 0; i < len(m.FrontendMappings); i++ {
if swag.IsZero(m.FrontendMappings[i]) { // not required
continue
}
if m.FrontendMappings[i] != nil {
if err := m.FrontendMappings[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this l r p spec based on the context it is used
func (m *LRPSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFrontendMappings(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LRPSpec) contextValidateFrontendMappings(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.FrontendMappings); i++ {
if m.FrontendMappings[i] != nil {
if swag.IsZero(m.FrontendMappings[i]) { // not required
return nil
}
if err := m.FrontendMappings[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-mappings" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LRPSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LRPSpec) UnmarshalBinary(b []byte) error {
var res LRPSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Label Label is the Cilium's representation of a container label
//
// swagger:model Label
type Label struct {
// key
Key string `json:"key,omitempty"`
// Source can be one of the above values (e.g. LabelSourceContainer)
Source string `json:"source,omitempty"`
// value
Value string `json:"value,omitempty"`
}
// Validate validates this label
func (m *Label) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this label based on context it is used
func (m *Label) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Label) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Label) UnmarshalBinary(b []byte) error {
var res Label
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelArray LabelArray is an array of labels forming a set
//
// swagger:model LabelArray
type LabelArray []*Label
// Validate validates this label array
func (m LabelArray) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this label array based on the context it is used
func (m LabelArray) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfiguration Label configuration of an endpoint
//
// swagger:model LabelConfiguration
type LabelConfiguration struct {
// The user provided desired configuration
Spec *LabelConfigurationSpec `json:"spec,omitempty"`
// The current configuration
Status *LabelConfigurationStatus `json:"status,omitempty"`
}
// Validate validates this label configuration
func (m *LabelConfiguration) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfiguration) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *LabelConfiguration) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this label configuration based on the context it is used
func (m *LabelConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfiguration) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *LabelConfiguration) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfiguration) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfiguration) UnmarshalBinary(b []byte) error {
var res LabelConfiguration
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfigurationSpec User desired Label configuration of an endpoint
//
// swagger:model LabelConfigurationSpec
type LabelConfigurationSpec struct {
// Custom labels in addition to orchestration system labels.
User Labels `json:"user,omitempty"`
}
// Validate validates this label configuration spec
func (m *LabelConfigurationSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateUser(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationSpec) validateUser(formats strfmt.Registry) error {
if swag.IsZero(m.User) { // not required
return nil
}
if err := m.User.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("user")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("user")
}
return err
}
return nil
}
// ContextValidate validate this label configuration spec based on the context it is used
func (m *LabelConfigurationSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateUser(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationSpec) contextValidateUser(ctx context.Context, formats strfmt.Registry) error {
if err := m.User.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("user")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("user")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfigurationSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfigurationSpec) UnmarshalBinary(b []byte) error {
var res LabelConfigurationSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// LabelConfigurationStatus Labels and label configuration of an endpoint
//
// swagger:model LabelConfigurationStatus
type LabelConfigurationStatus struct {
// All labels derived from the orchestration system
Derived Labels `json:"derived,omitempty"`
// Labels derived from orchestration system which have been disabled.
Disabled Labels `json:"disabled,omitempty"`
// The current configuration
Realized *LabelConfigurationSpec `json:"realized,omitempty"`
// Labels derived from orchestration system that are used in computing a security identity
SecurityRelevant Labels `json:"security-relevant,omitempty"`
}
// Validate validates this label configuration status
func (m *LabelConfigurationStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDerived(formats); err != nil {
res = append(res, err)
}
if err := m.validateDisabled(formats); err != nil {
res = append(res, err)
}
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecurityRelevant(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationStatus) validateDerived(formats strfmt.Registry) error {
if swag.IsZero(m.Derived) { // not required
return nil
}
if err := m.Derived.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("derived")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("derived")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) validateDisabled(formats strfmt.Registry) error {
if swag.IsZero(m.Disabled) { // not required
return nil
}
if err := m.Disabled.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("disabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("disabled")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *LabelConfigurationStatus) validateSecurityRelevant(formats strfmt.Registry) error {
if swag.IsZero(m.SecurityRelevant) { // not required
return nil
}
if err := m.SecurityRelevant.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("security-relevant")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("security-relevant")
}
return err
}
return nil
}
// ContextValidate validate this label configuration status based on the context it is used
func (m *LabelConfigurationStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDerived(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDisabled(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSecurityRelevant(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateDerived(ctx context.Context, formats strfmt.Registry) error {
if err := m.Derived.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("derived")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("derived")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateDisabled(ctx context.Context, formats strfmt.Registry) error {
if err := m.Disabled.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("disabled")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("disabled")
}
return err
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
func (m *LabelConfigurationStatus) contextValidateSecurityRelevant(ctx context.Context, formats strfmt.Registry) error {
if err := m.SecurityRelevant.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("security-relevant")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("security-relevant")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *LabelConfigurationStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *LabelConfigurationStatus) UnmarshalBinary(b []byte) error {
var res LabelConfigurationStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
)
// Labels Set of labels
//
// swagger:model Labels
type Labels []string
// Validate validates this labels
func (m Labels) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this labels based on context it is used
func (m Labels) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// MapEvent Event on Map
//
// swagger:model MapEvent
type MapEvent struct {
// Action type for event
// Enum: ["update","delete"]
Action string `json:"action,omitempty"`
// Desired action to be performed after this event
// Enum: ["ok","insert","delete"]
DesiredAction string `json:"desired-action,omitempty"`
// Map key on which the event occured
Key string `json:"key,omitempty"`
// Last error seen while performing desired action
LastError string `json:"last-error,omitempty"`
// Timestamp when the event occurred
// Format: date-time
Timestamp strfmt.DateTime `json:"timestamp,omitempty"`
// Map value on which the event occured
Value string `json:"value,omitempty"`
}
// Validate validates this map event
func (m *MapEvent) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAction(formats); err != nil {
res = append(res, err)
}
if err := m.validateDesiredAction(formats); err != nil {
res = append(res, err)
}
if err := m.validateTimestamp(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var mapEventTypeActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["update","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
mapEventTypeActionPropEnum = append(mapEventTypeActionPropEnum, v)
}
}
const (
// MapEventActionUpdate captures enum value "update"
MapEventActionUpdate string = "update"
// MapEventActionDelete captures enum value "delete"
MapEventActionDelete string = "delete"
)
// prop value enum
func (m *MapEvent) validateActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, mapEventTypeActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateAction(formats strfmt.Registry) error {
if swag.IsZero(m.Action) { // not required
return nil
}
// value enum
if err := m.validateActionEnum("action", "body", m.Action); err != nil {
return err
}
return nil
}
var mapEventTypeDesiredActionPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ok","insert","delete"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
mapEventTypeDesiredActionPropEnum = append(mapEventTypeDesiredActionPropEnum, v)
}
}
const (
// MapEventDesiredActionOk captures enum value "ok"
MapEventDesiredActionOk string = "ok"
// MapEventDesiredActionInsert captures enum value "insert"
MapEventDesiredActionInsert string = "insert"
// MapEventDesiredActionDelete captures enum value "delete"
MapEventDesiredActionDelete string = "delete"
)
// prop value enum
func (m *MapEvent) validateDesiredActionEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, mapEventTypeDesiredActionPropEnum, true); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateDesiredAction(formats strfmt.Registry) error {
if swag.IsZero(m.DesiredAction) { // not required
return nil
}
// value enum
if err := m.validateDesiredActionEnum("desired-action", "body", m.DesiredAction); err != nil {
return err
}
return nil
}
func (m *MapEvent) validateTimestamp(formats strfmt.Registry) error {
if swag.IsZero(m.Timestamp) { // not required
return nil
}
if err := validate.FormatOf("timestamp", "body", "date-time", m.Timestamp.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this map event based on context it is used
func (m *MapEvent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MapEvent) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MapEvent) UnmarshalBinary(b []byte) error {
var res MapEvent
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Masquerading Status of masquerading
//
// +k8s:deepcopy-gen=true
//
// swagger:model Masquerading
type Masquerading struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// enabled protocols
EnabledProtocols *MasqueradingEnabledProtocols `json:"enabledProtocols,omitempty"`
// Is BPF ip-masq-agent enabled
IPMasqAgent bool `json:"ip-masq-agent,omitempty"`
// mode
// Enum: ["BPF","iptables"]
Mode string `json:"mode,omitempty"`
// This field is obsolete, please use snat-exclusion-cidr-v4 or snat-exclusion-cidr-v6.
SnatExclusionCidr string `json:"snat-exclusion-cidr,omitempty"`
// SnatExclusionCIDRv4 exempts SNAT from being performed on any packet sent to
// an IPv4 address that belongs to this CIDR.
SnatExclusionCidrV4 string `json:"snat-exclusion-cidr-v4,omitempty"`
// SnatExclusionCIDRv6 exempts SNAT from being performed on any packet sent to
// an IPv6 address that belongs to this CIDR.
// For IPv6 we only do masquerading in iptables mode.
SnatExclusionCidrV6 string `json:"snat-exclusion-cidr-v6,omitempty"`
}
// Validate validates this masquerading
func (m *Masquerading) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEnabledProtocols(formats); err != nil {
res = append(res, err)
}
if err := m.validateMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Masquerading) validateEnabledProtocols(formats strfmt.Registry) error {
if swag.IsZero(m.EnabledProtocols) { // not required
return nil
}
if m.EnabledProtocols != nil {
if err := m.EnabledProtocols.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("enabledProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("enabledProtocols")
}
return err
}
}
return nil
}
var masqueradingTypeModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["BPF","iptables"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
masqueradingTypeModePropEnum = append(masqueradingTypeModePropEnum, v)
}
}
const (
// MasqueradingModeBPF captures enum value "BPF"
MasqueradingModeBPF string = "BPF"
// MasqueradingModeIptables captures enum value "iptables"
MasqueradingModeIptables string = "iptables"
)
// prop value enum
func (m *Masquerading) validateModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, masqueradingTypeModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Masquerading) validateMode(formats strfmt.Registry) error {
if swag.IsZero(m.Mode) { // not required
return nil
}
// value enum
if err := m.validateModeEnum("mode", "body", m.Mode); err != nil {
return err
}
return nil
}
// ContextValidate validate this masquerading based on the context it is used
func (m *Masquerading) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateEnabledProtocols(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Masquerading) contextValidateEnabledProtocols(ctx context.Context, formats strfmt.Registry) error {
if m.EnabledProtocols != nil {
if swag.IsZero(m.EnabledProtocols) { // not required
return nil
}
if err := m.EnabledProtocols.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("enabledProtocols")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("enabledProtocols")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Masquerading) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Masquerading) UnmarshalBinary(b []byte) error {
var res Masquerading
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// MasqueradingEnabledProtocols Is masquerading enabled
//
// swagger:model MasqueradingEnabledProtocols
type MasqueradingEnabledProtocols struct {
// Is masquerading enabled for IPv4 traffic
IPV4 bool `json:"ipv4,omitempty"`
// Is masquerading enabled for IPv6 traffic
IPV6 bool `json:"ipv6,omitempty"`
}
// Validate validates this masquerading enabled protocols
func (m *MasqueradingEnabledProtocols) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this masquerading enabled protocols based on context it is used
func (m *MasqueradingEnabledProtocols) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MasqueradingEnabledProtocols) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MasqueradingEnabledProtocols) UnmarshalBinary(b []byte) error {
var res MasqueradingEnabledProtocols
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// MessageForwardingStatistics Statistics of a message forwarding entity
//
// swagger:model MessageForwardingStatistics
type MessageForwardingStatistics struct {
// Number of messages denied
Denied int64 `json:"denied,omitempty"`
// Number of errors while parsing messages
Error int64 `json:"error,omitempty"`
// Number of messages forwarded
Forwarded int64 `json:"forwarded,omitempty"`
// Number of messages received
Received int64 `json:"received,omitempty"`
}
// Validate validates this message forwarding statistics
func (m *MessageForwardingStatistics) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this message forwarding statistics based on context it is used
func (m *MessageForwardingStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MessageForwardingStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MessageForwardingStatistics) UnmarshalBinary(b []byte) error {
var res MessageForwardingStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Metric Metric information
//
// swagger:model Metric
type Metric struct {
// Labels of the metric
Labels map[string]string `json:"labels,omitempty"`
// Name of the metric
Name string `json:"name,omitempty"`
// Value of the metric
Value float64 `json:"value,omitempty"`
}
// Validate validates this metric
func (m *Metric) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this metric based on context it is used
func (m *Metric) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Metric) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Metric) UnmarshalBinary(b []byte) error {
var res Metric
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// MonitorStatus Status of the node monitor
//
// swagger:model MonitorStatus
type MonitorStatus struct {
// Number of CPUs to listen on for events.
Cpus int64 `json:"cpus,omitempty"`
// Number of samples lost by perf.
Lost int64 `json:"lost,omitempty"`
// Number of pages used for the perf ring buffer.
Npages int64 `json:"npages,omitempty"`
// Pages size used for the perf ring buffer.
Pagesize int64 `json:"pagesize,omitempty"`
// Number of unknown samples.
Unknown int64 `json:"unknown,omitempty"`
}
// Validate validates this monitor status
func (m *MonitorStatus) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this monitor status based on context it is used
func (m *MonitorStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *MonitorStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *MonitorStatus) UnmarshalBinary(b []byte) error {
var res MonitorStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NameManager Internal state about DNS names in relation to policy subsystem
//
// swagger:model NameManager
type NameManager struct {
// Names to poll for DNS Poller
DNSPollNames []string `json:"DNSPollNames"`
// Mapping of FQDNSelectors to corresponding regular expressions
FQDNPolicySelectors []*SelectorEntry `json:"FQDNPolicySelectors"`
}
// Validate validates this name manager
func (m *NameManager) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFQDNPolicySelectors(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NameManager) validateFQDNPolicySelectors(formats strfmt.Registry) error {
if swag.IsZero(m.FQDNPolicySelectors) { // not required
return nil
}
for i := 0; i < len(m.FQDNPolicySelectors); i++ {
if swag.IsZero(m.FQDNPolicySelectors[i]) { // not required
continue
}
if m.FQDNPolicySelectors[i] != nil {
if err := m.FQDNPolicySelectors[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this name manager based on the context it is used
func (m *NameManager) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFQDNPolicySelectors(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NameManager) contextValidateFQDNPolicySelectors(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.FQDNPolicySelectors); i++ {
if m.FQDNPolicySelectors[i] != nil {
if swag.IsZero(m.FQDNPolicySelectors[i]) { // not required
return nil
}
if err := m.FQDNPolicySelectors[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NameManager) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NameManager) UnmarshalBinary(b []byte) error {
var res NameManager
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NamedPorts List of named Layer 4 port and protocol pairs which will be used in Network
// Policy specs.
//
// +deepequal-gen=true
// +k8s:deepcopy-gen=true
//
// swagger:model NamedPorts
type NamedPorts []*Port
// Validate validates this named ports
func (m NamedPorts) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this named ports based on the context it is used
func (m NamedPorts) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeAddressing Addressing information of a node for all address families
//
// +k8s:deepcopy-gen=true
//
// swagger:model NodeAddressing
type NodeAddressing struct {
// ipv4
IPV4 *NodeAddressingElement `json:"ipv4,omitempty"`
// ipv6
IPV6 *NodeAddressingElement `json:"ipv6,omitempty"`
}
// Validate validates this node addressing
func (m *NodeAddressing) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIPV4(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeAddressing) validateIPV4(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if m.IPV4 != nil {
if err := m.IPV4.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *NodeAddressing) validateIPV6(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if m.IPV6 != nil {
if err := m.IPV6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// ContextValidate validate this node addressing based on the context it is used
func (m *NodeAddressing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateIPV4(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeAddressing) contextValidateIPV4(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4 != nil {
if swag.IsZero(m.IPV4) { // not required
return nil
}
if err := m.IPV4.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4")
}
return err
}
}
return nil
}
func (m *NodeAddressing) contextValidateIPV6(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6 != nil {
if swag.IsZero(m.IPV6) { // not required
return nil
}
if err := m.IPV6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NodeAddressing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeAddressing) UnmarshalBinary(b []byte) error {
var res NodeAddressing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeAddressingElement Addressing information
//
// swagger:model NodeAddressingElement
type NodeAddressingElement struct {
// Node address type, one of HostName, ExternalIP or InternalIP
AddressType string `json:"address-type,omitempty"`
// Address pool to be used for local endpoints
AllocRange string `json:"alloc-range,omitempty"`
// True if address family is enabled
Enabled bool `json:"enabled,omitempty"`
// IP address of node
IP string `json:"ip,omitempty"`
}
// Validate validates this node addressing element
func (m *NodeAddressingElement) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this node addressing element based on context it is used
func (m *NodeAddressingElement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *NodeAddressingElement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeAddressingElement) UnmarshalBinary(b []byte) error {
var res NodeAddressingElement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NodeElement Known node in the cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model NodeElement
type NodeElement struct {
// Address used for probing cluster connectivity
HealthEndpointAddress *NodeAddressing `json:"health-endpoint-address,omitempty"`
// Source address for Ingress listener
IngressAddress *NodeAddressing `json:"ingress-address,omitempty"`
// Name of the node including the cluster association. This is typically
// <clustername>/<hostname>.
//
Name string `json:"name,omitempty"`
// Primary address used for intra-cluster communication
PrimaryAddress *NodeAddressing `json:"primary-address,omitempty"`
// Alternative addresses assigned to the node
SecondaryAddresses []*NodeAddressingElement `json:"secondary-addresses"`
// Source of the node configuration
Source string `json:"source,omitempty"`
}
// Validate validates this node element
func (m *NodeElement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateHealthEndpointAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateIngressAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validatePrimaryAddress(formats); err != nil {
res = append(res, err)
}
if err := m.validateSecondaryAddresses(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeElement) validateHealthEndpointAddress(formats strfmt.Registry) error {
if swag.IsZero(m.HealthEndpointAddress) { // not required
return nil
}
if m.HealthEndpointAddress != nil {
if err := m.HealthEndpointAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health-endpoint-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health-endpoint-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validateIngressAddress(formats strfmt.Registry) error {
if swag.IsZero(m.IngressAddress) { // not required
return nil
}
if m.IngressAddress != nil {
if err := m.IngressAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validatePrimaryAddress(formats strfmt.Registry) error {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if m.PrimaryAddress != nil {
if err := m.PrimaryAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *NodeElement) validateSecondaryAddresses(formats strfmt.Registry) error {
if swag.IsZero(m.SecondaryAddresses) { // not required
return nil
}
for i := 0; i < len(m.SecondaryAddresses); i++ {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
continue
}
if m.SecondaryAddresses[i] != nil {
if err := m.SecondaryAddresses[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this node element based on the context it is used
func (m *NodeElement) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateHealthEndpointAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIngressAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeElement) contextValidateHealthEndpointAddress(ctx context.Context, formats strfmt.Registry) error {
if m.HealthEndpointAddress != nil {
if swag.IsZero(m.HealthEndpointAddress) { // not required
return nil
}
if err := m.HealthEndpointAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("health-endpoint-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("health-endpoint-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidateIngressAddress(ctx context.Context, formats strfmt.Registry) error {
if m.IngressAddress != nil {
if swag.IsZero(m.IngressAddress) { // not required
return nil
}
if err := m.IngressAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ingress-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ingress-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error {
if m.PrimaryAddress != nil {
if swag.IsZero(m.PrimaryAddress) { // not required
return nil
}
if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("primary-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("primary-address")
}
return err
}
}
return nil
}
func (m *NodeElement) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.SecondaryAddresses); i++ {
if m.SecondaryAddresses[i] != nil {
if swag.IsZero(m.SecondaryAddresses[i]) { // not required
return nil
}
if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NodeElement) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeElement) UnmarshalBinary(b []byte) error {
var res NodeElement
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// NodeID Node ID with associated node IP addresses
//
// swagger:model NodeID
type NodeID struct {
// ID allocated by the agent for the node
// Required: true
ID *int64 `json:"id"`
// IP addresses of the node associated with the ID in the agent
// Required: true
Ips []string `json:"ips"`
}
// Validate validates this node ID
func (m *NodeID) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateID(formats); err != nil {
res = append(res, err)
}
if err := m.validateIps(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NodeID) validateID(formats strfmt.Registry) error {
if err := validate.Required("id", "body", m.ID); err != nil {
return err
}
return nil
}
func (m *NodeID) validateIps(formats strfmt.Registry) error {
if err := validate.Required("ips", "body", m.Ips); err != nil {
return err
}
return nil
}
// ContextValidate validates this node ID based on context it is used
func (m *NodeID) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *NodeID) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NodeID) UnmarshalBinary(b []byte) error {
var res NodeID
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Policy Policy definition
//
// swagger:model Policy
type Policy struct {
// Policy definition as JSON.
Policy string `json:"policy,omitempty"`
// Revision number of the policy. Incremented each time the policy is
// changed in the agent's repository
//
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this policy
func (m *Policy) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy based on context it is used
func (m *Policy) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Policy) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Policy) UnmarshalBinary(b []byte) error {
var res Policy
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PolicyRule A policy rule including the rule labels it derives from
//
// swagger:model PolicyRule
type PolicyRule struct {
// The policy rule labels identifying the policy rules this rule derives from
DerivedFromRules [][]string `json:"derived-from-rules"`
// The policy rule as json
Rule string `json:"rule,omitempty"`
// The policy rule labels identifying the policy rules this rule derives from, mapped by selector
RulesBySelector map[string][][]string `json:"rules-by-selector,omitempty"`
}
// Validate validates this policy rule
func (m *PolicyRule) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy rule based on context it is used
func (m *PolicyRule) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PolicyRule) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PolicyRule) UnmarshalBinary(b []byte) error {
var res PolicyRule
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PolicyTraceResult Response to a policy resolution process
//
// swagger:model PolicyTraceResult
type PolicyTraceResult struct {
// log
Log string `json:"log,omitempty"`
// verdict
Verdict string `json:"verdict,omitempty"`
}
// Validate validates this policy trace result
func (m *PolicyTraceResult) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this policy trace result based on context it is used
func (m *PolicyTraceResult) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PolicyTraceResult) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PolicyTraceResult) UnmarshalBinary(b []byte) error {
var res PolicyTraceResult
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Port Layer 4 port / protocol pair
//
// +deepequal-gen=true
//
// swagger:model Port
type Port struct {
// Optional layer 4 port name
Name string `json:"name,omitempty"`
// Layer 4 port number
Port uint16 `json:"port,omitempty"`
// Layer 4 protocol
// Enum: ["TCP","UDP","SCTP","ICMP","ICMPV6","ANY"]
Protocol string `json:"protocol,omitempty"`
}
// Validate validates this port
func (m *Port) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProtocol(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var portTypeProtocolPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["TCP","UDP","SCTP","ICMP","ICMPV6","ANY"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
portTypeProtocolPropEnum = append(portTypeProtocolPropEnum, v)
}
}
const (
// PortProtocolTCP captures enum value "TCP"
PortProtocolTCP string = "TCP"
// PortProtocolUDP captures enum value "UDP"
PortProtocolUDP string = "UDP"
// PortProtocolSCTP captures enum value "SCTP"
PortProtocolSCTP string = "SCTP"
// PortProtocolICMP captures enum value "ICMP"
PortProtocolICMP string = "ICMP"
// PortProtocolICMPV6 captures enum value "ICMPV6"
PortProtocolICMPV6 string = "ICMPV6"
// PortProtocolANY captures enum value "ANY"
PortProtocolANY string = "ANY"
)
// prop value enum
func (m *Port) validateProtocolEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, portTypeProtocolPropEnum, true); err != nil {
return err
}
return nil
}
func (m *Port) validateProtocol(formats strfmt.Registry) error {
if swag.IsZero(m.Protocol) { // not required
return nil
}
// value enum
if err := m.validateProtocolEnum("protocol", "body", m.Protocol); err != nil {
return err
}
return nil
}
// ContextValidate validates this port based on context it is used
func (m *Port) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Port) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Port) UnmarshalBinary(b []byte) error {
var res Port
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Prefilter Collection of endpoints to be served
//
// swagger:model Prefilter
type Prefilter struct {
// spec
Spec *PrefilterSpec `json:"spec,omitempty"`
// status
Status *PrefilterStatus `json:"status,omitempty"`
}
// Validate validates this prefilter
func (m *Prefilter) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Prefilter) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Prefilter) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this prefilter based on the context it is used
func (m *Prefilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Prefilter) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Prefilter) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Prefilter) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Prefilter) UnmarshalBinary(b []byte) error {
var res Prefilter
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PrefilterSpec CIDR ranges implemented in the Prefilter
//
// swagger:model PrefilterSpec
type PrefilterSpec struct {
// deny
Deny []string `json:"deny"`
// revision
Revision int64 `json:"revision,omitempty"`
}
// Validate validates this prefilter spec
func (m *PrefilterSpec) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this prefilter spec based on context it is used
func (m *PrefilterSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PrefilterSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PrefilterSpec) UnmarshalBinary(b []byte) error {
var res PrefilterSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PrefilterStatus CIDR ranges implemented in the Prefilter
//
// swagger:model PrefilterStatus
type PrefilterStatus struct {
// realized
Realized *PrefilterSpec `json:"realized,omitempty"`
}
// Validate validates this prefilter status
func (m *PrefilterStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PrefilterStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this prefilter status based on the context it is used
func (m *PrefilterStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PrefilterStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *PrefilterStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PrefilterStatus) UnmarshalBinary(b []byte) error {
var res PrefilterStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ProxyRedirect Configured proxy redirection state
//
// swagger:model ProxyRedirect
type ProxyRedirect struct {
// Name of the proxy redirect
Name string `json:"name,omitempty"`
// Name of the proxy this redirect points to
Proxy string `json:"proxy,omitempty"`
// Host port that this redirect points to
ProxyPort int64 `json:"proxy-port,omitempty"`
}
// Validate validates this proxy redirect
func (m *ProxyRedirect) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this proxy redirect based on context it is used
func (m *ProxyRedirect) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ProxyRedirect) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyRedirect) UnmarshalBinary(b []byte) error {
var res ProxyRedirect
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ProxyStatistics Statistics of a set of proxy redirects for an endpoint
//
// +k8s:deepcopy-gen=true
//
// swagger:model ProxyStatistics
type ProxyStatistics struct {
// The port the proxy is listening on
AllocatedProxyPort int64 `json:"allocated-proxy-port,omitempty"`
// Location of where the redirect is installed
// Enum: ["ingress","egress"]
Location string `json:"location,omitempty"`
// The port subject to the redirect
Port int64 `json:"port,omitempty"`
// Name of the L7 protocol
Protocol string `json:"protocol,omitempty"`
// Statistics of this set of proxy redirect
Statistics *RequestResponseStatistics `json:"statistics,omitempty"`
}
// Validate validates this proxy statistics
func (m *ProxyStatistics) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLocation(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatistics(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var proxyStatisticsTypeLocationPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ingress","egress"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
proxyStatisticsTypeLocationPropEnum = append(proxyStatisticsTypeLocationPropEnum, v)
}
}
const (
// ProxyStatisticsLocationIngress captures enum value "ingress"
ProxyStatisticsLocationIngress string = "ingress"
// ProxyStatisticsLocationEgress captures enum value "egress"
ProxyStatisticsLocationEgress string = "egress"
)
// prop value enum
func (m *ProxyStatistics) validateLocationEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, proxyStatisticsTypeLocationPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ProxyStatistics) validateLocation(formats strfmt.Registry) error {
if swag.IsZero(m.Location) { // not required
return nil
}
// value enum
if err := m.validateLocationEnum("location", "body", m.Location); err != nil {
return err
}
return nil
}
func (m *ProxyStatistics) validateStatistics(formats strfmt.Registry) error {
if swag.IsZero(m.Statistics) { // not required
return nil
}
if m.Statistics != nil {
if err := m.Statistics.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statistics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statistics")
}
return err
}
}
return nil
}
// ContextValidate validate this proxy statistics based on the context it is used
func (m *ProxyStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateStatistics(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProxyStatistics) contextValidateStatistics(ctx context.Context, formats strfmt.Registry) error {
if m.Statistics != nil {
if swag.IsZero(m.Statistics) { // not required
return nil
}
if err := m.Statistics.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("statistics")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("statistics")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ProxyStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyStatistics) UnmarshalBinary(b []byte) error {
var res ProxyStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ProxyStatus Status of proxy
//
// +k8s:deepcopy-gen=true
//
// swagger:model ProxyStatus
type ProxyStatus struct {
// Deployment mode of Envoy L7 proxy
// Enum: ["embedded","external"]
EnvoyDeploymentMode string `json:"envoy-deployment-mode,omitempty"`
// IP address that the proxy listens on
IP string `json:"ip,omitempty"`
// Port range used for proxying
PortRange string `json:"port-range,omitempty"`
// Detailed description of configured redirects
Redirects []*ProxyRedirect `json:"redirects"`
// Total number of listening proxy ports
TotalPorts int64 `json:"total-ports,omitempty"`
// Total number of ports configured to redirect to proxies
TotalRedirects int64 `json:"total-redirects,omitempty"`
}
// Validate validates this proxy status
func (m *ProxyStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateEnvoyDeploymentMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateRedirects(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var proxyStatusTypeEnvoyDeploymentModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["embedded","external"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
proxyStatusTypeEnvoyDeploymentModePropEnum = append(proxyStatusTypeEnvoyDeploymentModePropEnum, v)
}
}
const (
// ProxyStatusEnvoyDeploymentModeEmbedded captures enum value "embedded"
ProxyStatusEnvoyDeploymentModeEmbedded string = "embedded"
// ProxyStatusEnvoyDeploymentModeExternal captures enum value "external"
ProxyStatusEnvoyDeploymentModeExternal string = "external"
)
// prop value enum
func (m *ProxyStatus) validateEnvoyDeploymentModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, proxyStatusTypeEnvoyDeploymentModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ProxyStatus) validateEnvoyDeploymentMode(formats strfmt.Registry) error {
if swag.IsZero(m.EnvoyDeploymentMode) { // not required
return nil
}
// value enum
if err := m.validateEnvoyDeploymentModeEnum("envoy-deployment-mode", "body", m.EnvoyDeploymentMode); err != nil {
return err
}
return nil
}
func (m *ProxyStatus) validateRedirects(formats strfmt.Registry) error {
if swag.IsZero(m.Redirects) { // not required
return nil
}
for i := 0; i < len(m.Redirects); i++ {
if swag.IsZero(m.Redirects[i]) { // not required
continue
}
if m.Redirects[i] != nil {
if err := m.Redirects[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("redirects" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("redirects" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this proxy status based on the context it is used
func (m *ProxyStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRedirects(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProxyStatus) contextValidateRedirects(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Redirects); i++ {
if m.Redirects[i] != nil {
if swag.IsZero(m.Redirects[i]) { // not required
return nil
}
if err := m.Redirects[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("redirects" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("redirects" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ProxyStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProxyStatus) UnmarshalBinary(b []byte) error {
var res ProxyStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Recorder Collection of wildcard filters for pcap recorder
//
// swagger:model Recorder
type Recorder struct {
// spec
Spec *RecorderSpec `json:"spec,omitempty"`
// status
Status *RecorderStatus `json:"status,omitempty"`
}
// Validate validates this recorder
func (m *Recorder) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Recorder) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Recorder) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this recorder based on the context it is used
func (m *Recorder) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Recorder) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Recorder) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Recorder) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Recorder) UnmarshalBinary(b []byte) error {
var res Recorder
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// RecorderFilter n-tuple filter to match traffic to be recorded
//
// swagger:model RecorderFilter
type RecorderFilter struct {
// Layer 4 destination port, zero (or in future range)
DstPort string `json:"dst-port,omitempty"`
// Layer 3 destination CIDR
DstPrefix string `json:"dst-prefix,omitempty"`
// Layer 4 protocol
// Enum: ["TCP","UDP","SCTP","ANY"]
Protocol string `json:"protocol,omitempty"`
// Layer 4 source port, zero (or in future range)
SrcPort string `json:"src-port,omitempty"`
// Layer 3 source CIDR
SrcPrefix string `json:"src-prefix,omitempty"`
}
// Validate validates this recorder filter
func (m *RecorderFilter) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateProtocol(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var recorderFilterTypeProtocolPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["TCP","UDP","SCTP","ANY"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
recorderFilterTypeProtocolPropEnum = append(recorderFilterTypeProtocolPropEnum, v)
}
}
const (
// RecorderFilterProtocolTCP captures enum value "TCP"
RecorderFilterProtocolTCP string = "TCP"
// RecorderFilterProtocolUDP captures enum value "UDP"
RecorderFilterProtocolUDP string = "UDP"
// RecorderFilterProtocolSCTP captures enum value "SCTP"
RecorderFilterProtocolSCTP string = "SCTP"
// RecorderFilterProtocolANY captures enum value "ANY"
RecorderFilterProtocolANY string = "ANY"
)
// prop value enum
func (m *RecorderFilter) validateProtocolEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, recorderFilterTypeProtocolPropEnum, true); err != nil {
return err
}
return nil
}
func (m *RecorderFilter) validateProtocol(formats strfmt.Registry) error {
if swag.IsZero(m.Protocol) { // not required
return nil
}
// value enum
if err := m.validateProtocolEnum("protocol", "body", m.Protocol); err != nil {
return err
}
return nil
}
// ContextValidate validates this recorder filter based on context it is used
func (m *RecorderFilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RecorderFilter) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderFilter) UnmarshalBinary(b []byte) error {
var res RecorderFilter
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RecorderMask Individual mask for pcap recorder
//
// swagger:model RecorderMask
type RecorderMask struct {
// status
Status *RecorderMaskStatus `json:"status,omitempty"`
}
// Validate validates this recorder mask
func (m *RecorderMask) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderMask) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this recorder mask based on the context it is used
func (m *RecorderMask) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderMask) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RecorderMask) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderMask) UnmarshalBinary(b []byte) error {
var res RecorderMask
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RecorderMaskSpec Configuration of a recorder mask
//
// swagger:model RecorderMaskSpec
type RecorderMaskSpec struct {
// Layer 4 destination port mask
DstPortMask string `json:"dst-port-mask,omitempty"`
// Layer 3 destination IP mask
DstPrefixMask string `json:"dst-prefix-mask,omitempty"`
// Priority of this mask
Priority int64 `json:"priority,omitempty"`
// Layer 4 protocol mask
ProtocolMask string `json:"protocol-mask,omitempty"`
// Layer 4 source port mask
SrcPortMask string `json:"src-port-mask,omitempty"`
// Layer 3 source IP mask
SrcPrefixMask string `json:"src-prefix-mask,omitempty"`
// Number of users of this mask
Users int64 `json:"users,omitempty"`
}
// Validate validates this recorder mask spec
func (m *RecorderMaskSpec) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this recorder mask spec based on context it is used
func (m *RecorderMaskSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RecorderMaskSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderMaskSpec) UnmarshalBinary(b []byte) error {
var res RecorderMaskSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RecorderMaskStatus Configuration of a recorder mask
//
// swagger:model RecorderMaskStatus
type RecorderMaskStatus struct {
// realized
Realized *RecorderMaskSpec `json:"realized,omitempty"`
}
// Validate validates this recorder mask status
func (m *RecorderMaskStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderMaskStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this recorder mask status based on the context it is used
func (m *RecorderMaskStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderMaskStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RecorderMaskStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderMaskStatus) UnmarshalBinary(b []byte) error {
var res RecorderMaskStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// RecorderSpec Configuration of a recorder
//
// swagger:model RecorderSpec
type RecorderSpec struct {
// Maximum packet length or zero for full packet length
CaptureLength int64 `json:"capture-length,omitempty"`
// List of wildcard filters for given recorder
// Required: true
Filters []*RecorderFilter `json:"filters"`
// Unique identification
// Required: true
ID *int64 `json:"id"`
}
// Validate validates this recorder spec
func (m *RecorderSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFilters(formats); err != nil {
res = append(res, err)
}
if err := m.validateID(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderSpec) validateFilters(formats strfmt.Registry) error {
if err := validate.Required("filters", "body", m.Filters); err != nil {
return err
}
for i := 0; i < len(m.Filters); i++ {
if swag.IsZero(m.Filters[i]) { // not required
continue
}
if m.Filters[i] != nil {
if err := m.Filters[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("filters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("filters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *RecorderSpec) validateID(formats strfmt.Registry) error {
if err := validate.Required("id", "body", m.ID); err != nil {
return err
}
return nil
}
// ContextValidate validate this recorder spec based on the context it is used
func (m *RecorderSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFilters(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderSpec) contextValidateFilters(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Filters); i++ {
if m.Filters[i] != nil {
if swag.IsZero(m.Filters[i]) { // not required
return nil
}
if err := m.Filters[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("filters" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("filters" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RecorderSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderSpec) UnmarshalBinary(b []byte) error {
var res RecorderSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RecorderStatus Configuration of a recorder
//
// swagger:model RecorderStatus
type RecorderStatus struct {
// realized
Realized *RecorderSpec `json:"realized,omitempty"`
}
// Validate validates this recorder status
func (m *RecorderStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this recorder status based on the context it is used
func (m *RecorderStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RecorderStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RecorderStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RecorderStatus) UnmarshalBinary(b []byte) error {
var res RecorderStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// RemoteCluster Status of remote cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteCluster
type RemoteCluster struct {
// Cluster configuration exposed by the remote cluster
Config *RemoteClusterConfig `json:"config,omitempty"`
// Indicates whether the connection to the remote kvstore is established
Connected bool `json:"connected,omitempty"`
// Time of last failure that occurred while attempting to reach the cluster
// Format: date-time
LastFailure strfmt.DateTime `json:"last-failure,omitempty"`
// Name of the cluster
Name string `json:"name,omitempty"`
// Number of endpoints in the cluster
NumEndpoints int64 `json:"num-endpoints,omitempty"`
// Number of failures reaching the cluster
NumFailures int64 `json:"num-failures,omitempty"`
// Number of identities in the cluster
NumIdentities int64 `json:"num-identities,omitempty"`
// Number of nodes in the cluster
NumNodes int64 `json:"num-nodes,omitempty"`
// Number of MCS-API service exports in the cluster
NumServiceExports int64 `json:"num-service-exports,omitempty"`
// Number of services in the cluster
NumSharedServices int64 `json:"num-shared-services,omitempty"`
// Indicates readiness of the remote cluster
Ready bool `json:"ready,omitempty"`
// Status of the control plane
Status string `json:"status,omitempty"`
// Synchronization status about each resource type
Synced *RemoteClusterSynced `json:"synced,omitempty"`
}
// Validate validates this remote cluster
func (m *RemoteCluster) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateConfig(formats); err != nil {
res = append(res, err)
}
if err := m.validateLastFailure(formats); err != nil {
res = append(res, err)
}
if err := m.validateSynced(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RemoteCluster) validateConfig(formats strfmt.Registry) error {
if swag.IsZero(m.Config) { // not required
return nil
}
if m.Config != nil {
if err := m.Config.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("config")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("config")
}
return err
}
}
return nil
}
func (m *RemoteCluster) validateLastFailure(formats strfmt.Registry) error {
if swag.IsZero(m.LastFailure) { // not required
return nil
}
if err := validate.FormatOf("last-failure", "body", "date-time", m.LastFailure.String(), formats); err != nil {
return err
}
return nil
}
func (m *RemoteCluster) validateSynced(formats strfmt.Registry) error {
if swag.IsZero(m.Synced) { // not required
return nil
}
if m.Synced != nil {
if err := m.Synced.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("synced")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("synced")
}
return err
}
}
return nil
}
// ContextValidate validate this remote cluster based on the context it is used
func (m *RemoteCluster) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateConfig(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSynced(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RemoteCluster) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error {
if m.Config != nil {
if swag.IsZero(m.Config) { // not required
return nil
}
if err := m.Config.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("config")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("config")
}
return err
}
}
return nil
}
func (m *RemoteCluster) contextValidateSynced(ctx context.Context, formats strfmt.Registry) error {
if m.Synced != nil {
if swag.IsZero(m.Synced) { // not required
return nil
}
if err := m.Synced.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("synced")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("synced")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RemoteCluster) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteCluster) UnmarshalBinary(b []byte) error {
var res RemoteCluster
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RemoteClusterConfig Cluster configuration exposed by the remote cluster
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteClusterConfig
type RemoteClusterConfig struct {
// The Cluster ID advertised by the remote cluster
ClusterID int64 `json:"cluster-id,omitempty"`
// Whether the remote cluster information is locally cached by kvstoremesh
Kvstoremesh bool `json:"kvstoremesh,omitempty"`
// Whether the configuration is required to be present
Required bool `json:"required,omitempty"`
// Whether the configuration has been correctly retrieved
Retrieved bool `json:"retrieved,omitempty"`
// Whether or not MCS-API ServiceExports is enabled by the cluster (null means unsupported).
ServiceExportsEnabled *bool `json:"service-exports-enabled,omitempty"`
// Whether the remote cluster supports per-prefix "synced" canaries
SyncCanaries bool `json:"sync-canaries,omitempty"`
}
// Validate validates this remote cluster config
func (m *RemoteClusterConfig) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this remote cluster config based on context it is used
func (m *RemoteClusterConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RemoteClusterConfig) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteClusterConfig) UnmarshalBinary(b []byte) error {
var res RemoteClusterConfig
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RemoteClusterSynced Status of the synchronization with the remote cluster, about each resource
// type. A given resource is considered to be synchronized if the initial
// list of entries has been completely received from the remote cluster, and
// new events are currently being watched.
//
// +k8s:deepcopy-gen=true
//
// swagger:model RemoteClusterSynced
type RemoteClusterSynced struct {
// Endpoints synchronization status
Endpoints bool `json:"endpoints,omitempty"`
// Identities synchronization status
Identities bool `json:"identities,omitempty"`
// Nodes synchronization status
Nodes bool `json:"nodes,omitempty"`
// MCS-API service exports synchronization status (null means that the component is not watching service exports)
ServiceExports *bool `json:"service-exports,omitempty"`
// Services synchronization status
Services bool `json:"services,omitempty"`
}
// Validate validates this remote cluster synced
func (m *RemoteClusterSynced) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this remote cluster synced based on context it is used
func (m *RemoteClusterSynced) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *RemoteClusterSynced) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RemoteClusterSynced) UnmarshalBinary(b []byte) error {
var res RemoteClusterSynced
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// RequestResponseStatistics Statistics of a proxy redirect
//
// +k8s:deepcopy-gen=true
//
// swagger:model RequestResponseStatistics
type RequestResponseStatistics struct {
// requests
Requests *MessageForwardingStatistics `json:"requests,omitempty"`
// responses
Responses *MessageForwardingStatistics `json:"responses,omitempty"`
}
// Validate validates this request response statistics
func (m *RequestResponseStatistics) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRequests(formats); err != nil {
res = append(res, err)
}
if err := m.validateResponses(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RequestResponseStatistics) validateRequests(formats strfmt.Registry) error {
if swag.IsZero(m.Requests) { // not required
return nil
}
if m.Requests != nil {
if err := m.Requests.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("requests")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("requests")
}
return err
}
}
return nil
}
func (m *RequestResponseStatistics) validateResponses(formats strfmt.Registry) error {
if swag.IsZero(m.Responses) { // not required
return nil
}
if m.Responses != nil {
if err := m.Responses.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("responses")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("responses")
}
return err
}
}
return nil
}
// ContextValidate validate this request response statistics based on the context it is used
func (m *RequestResponseStatistics) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRequests(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateResponses(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *RequestResponseStatistics) contextValidateRequests(ctx context.Context, formats strfmt.Registry) error {
if m.Requests != nil {
if swag.IsZero(m.Requests) { // not required
return nil
}
if err := m.Requests.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("requests")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("requests")
}
return err
}
}
return nil
}
func (m *RequestResponseStatistics) contextValidateResponses(ctx context.Context, formats strfmt.Registry) error {
if m.Responses != nil {
if swag.IsZero(m.Responses) { // not required
return nil
}
if err := m.Responses.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("responses")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("responses")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *RequestResponseStatistics) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *RequestResponseStatistics) UnmarshalBinary(b []byte) error {
var res RequestResponseStatistics
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Routing Status of routing
//
// swagger:model Routing
type Routing struct {
// Datapath routing mode for cross-cluster connectivity
// Enum: ["Native","Tunnel"]
InterHostRoutingMode string `json:"inter-host-routing-mode,omitempty"`
// Datapath routing mode for connectivity within the host
// Enum: ["BPF","Legacy"]
IntraHostRoutingMode string `json:"intra-host-routing-mode,omitempty"`
// Tunnel protocol in use for cross-cluster connectivity
TunnelProtocol string `json:"tunnel-protocol,omitempty"`
}
// Validate validates this routing
func (m *Routing) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateInterHostRoutingMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateIntraHostRoutingMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var routingTypeInterHostRoutingModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Native","Tunnel"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
routingTypeInterHostRoutingModePropEnum = append(routingTypeInterHostRoutingModePropEnum, v)
}
}
const (
// RoutingInterHostRoutingModeNative captures enum value "Native"
RoutingInterHostRoutingModeNative string = "Native"
// RoutingInterHostRoutingModeTunnel captures enum value "Tunnel"
RoutingInterHostRoutingModeTunnel string = "Tunnel"
)
// prop value enum
func (m *Routing) validateInterHostRoutingModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, routingTypeInterHostRoutingModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Routing) validateInterHostRoutingMode(formats strfmt.Registry) error {
if swag.IsZero(m.InterHostRoutingMode) { // not required
return nil
}
// value enum
if err := m.validateInterHostRoutingModeEnum("inter-host-routing-mode", "body", m.InterHostRoutingMode); err != nil {
return err
}
return nil
}
var routingTypeIntraHostRoutingModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["BPF","Legacy"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
routingTypeIntraHostRoutingModePropEnum = append(routingTypeIntraHostRoutingModePropEnum, v)
}
}
const (
// RoutingIntraHostRoutingModeBPF captures enum value "BPF"
RoutingIntraHostRoutingModeBPF string = "BPF"
// RoutingIntraHostRoutingModeLegacy captures enum value "Legacy"
RoutingIntraHostRoutingModeLegacy string = "Legacy"
)
// prop value enum
func (m *Routing) validateIntraHostRoutingModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, routingTypeIntraHostRoutingModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Routing) validateIntraHostRoutingMode(formats strfmt.Registry) error {
if swag.IsZero(m.IntraHostRoutingMode) { // not required
return nil
}
// value enum
if err := m.validateIntraHostRoutingModeEnum("intra-host-routing-mode", "body", m.IntraHostRoutingMode); err != nil {
return err
}
return nil
}
// ContextValidate validates this routing based on context it is used
func (m *Routing) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Routing) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Routing) UnmarshalBinary(b []byte) error {
var res Routing
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorCache cache of which identities match selectors in the policy repository
//
// swagger:model SelectorCache
type SelectorCache []*SelectorIdentityMapping
// Validate validates this selector cache
func (m SelectorCache) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validate this selector cache based on the context it is used
func (m SelectorCache) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if m[i] != nil {
if swag.IsZero(m[i]) { // not required
return nil
}
if err := m[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorEntry Mapping of FQDNSelector to corresponding regular expression
//
// swagger:model SelectorEntry
type SelectorEntry struct {
// String representation of regular expression form of FQDNSelector
RegexString string `json:"regexString,omitempty"`
// FQDNSelector in string representation
SelectorString string `json:"selectorString,omitempty"`
}
// Validate validates this selector entry
func (m *SelectorEntry) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this selector entry based on context it is used
func (m *SelectorEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *SelectorEntry) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *SelectorEntry) UnmarshalBinary(b []byte) error {
var res SelectorEntry
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// SelectorIdentityMapping mapping of selector to identities which match it
//
// swagger:model SelectorIdentityMapping
type SelectorIdentityMapping struct {
// identities mapping to this selector
Identities []int64 `json:"identities"`
// Labels are the metadata labels associated with the selector
Labels LabelArray `json:"labels,omitempty"`
// string form of selector
Selector string `json:"selector,omitempty"`
// number of users of this selector in the cache
Users int64 `json:"users,omitempty"`
}
// Validate validates this selector identity mapping
func (m *SelectorIdentityMapping) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *SelectorIdentityMapping) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this selector identity mapping based on the context it is used
func (m *SelectorIdentityMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *SelectorIdentityMapping) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *SelectorIdentityMapping) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *SelectorIdentityMapping) UnmarshalBinary(b []byte) error {
var res SelectorIdentityMapping
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Service Collection of endpoints to be served
//
// swagger:model Service
type Service struct {
// spec
Spec *ServiceSpec `json:"spec,omitempty"`
// status
Status *ServiceStatus `json:"status,omitempty"`
}
// Validate validates this service
func (m *Service) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSpec(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Service) validateSpec(formats strfmt.Registry) error {
if swag.IsZero(m.Spec) { // not required
return nil
}
if m.Spec != nil {
if err := m.Spec.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Service) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if m.Status != nil {
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// ContextValidate validate this service based on the context it is used
func (m *Service) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateSpec(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateStatus(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Service) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error {
if m.Spec != nil {
if swag.IsZero(m.Spec) { // not required
return nil
}
if err := m.Spec.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("spec")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("spec")
}
return err
}
}
return nil
}
func (m *Service) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error {
if m.Status != nil {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("status")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Service) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Service) UnmarshalBinary(b []byte) error {
var res Service
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// ServiceSpec Configuration of a service
//
// swagger:model ServiceSpec
type ServiceSpec struct {
// List of backend addresses
BackendAddresses []*BackendAddress `json:"backend-addresses"`
// flags
Flags *ServiceSpecFlags `json:"flags,omitempty"`
// Frontend address
// Required: true
FrontendAddress *FrontendAddress `json:"frontend-address"`
// Unique identification
ID int64 `json:"id,omitempty"`
// Update all services selecting the backends with their given states
// (id and frontend are ignored)
//
UpdateServices bool `json:"updateServices,omitempty"`
}
// Validate validates this service spec
func (m *ServiceSpec) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBackendAddresses(formats); err != nil {
res = append(res, err)
}
if err := m.validateFlags(formats); err != nil {
res = append(res, err)
}
if err := m.validateFrontendAddress(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceSpec) validateBackendAddresses(formats strfmt.Registry) error {
if swag.IsZero(m.BackendAddresses) { // not required
return nil
}
for i := 0; i < len(m.BackendAddresses); i++ {
if swag.IsZero(m.BackendAddresses[i]) { // not required
continue
}
if m.BackendAddresses[i] != nil {
if err := m.BackendAddresses[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ServiceSpec) validateFlags(formats strfmt.Registry) error {
if swag.IsZero(m.Flags) { // not required
return nil
}
if m.Flags != nil {
if err := m.Flags.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("flags")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("flags")
}
return err
}
}
return nil
}
func (m *ServiceSpec) validateFrontendAddress(formats strfmt.Registry) error {
if err := validate.Required("frontend-address", "body", m.FrontendAddress); err != nil {
return err
}
if m.FrontendAddress != nil {
if err := m.FrontendAddress.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// ContextValidate validate this service spec based on the context it is used
func (m *ServiceSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateBackendAddresses(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFlags(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateFrontendAddress(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceSpec) contextValidateBackendAddresses(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.BackendAddresses); i++ {
if m.BackendAddresses[i] != nil {
if swag.IsZero(m.BackendAddresses[i]) { // not required
return nil
}
if err := m.BackendAddresses[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("backend-addresses" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *ServiceSpec) contextValidateFlags(ctx context.Context, formats strfmt.Registry) error {
if m.Flags != nil {
if swag.IsZero(m.Flags) { // not required
return nil
}
if err := m.Flags.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("flags")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("flags")
}
return err
}
}
return nil
}
func (m *ServiceSpec) contextValidateFrontendAddress(ctx context.Context, formats strfmt.Registry) error {
if m.FrontendAddress != nil {
if err := m.FrontendAddress.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("frontend-address")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("frontend-address")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ServiceSpec) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceSpec) UnmarshalBinary(b []byte) error {
var res ServiceSpec
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// ServiceSpecFlags Optional service configuration flags
//
// swagger:model ServiceSpecFlags
type ServiceSpecFlags struct {
// Service cluster
Cluster string `json:"cluster,omitempty"`
// Service external traffic policy
// Enum: ["Cluster","Local"]
ExtTrafficPolicy string `json:"extTrafficPolicy,omitempty"`
// Service health check node port
HealthCheckNodePort uint16 `json:"healthCheckNodePort,omitempty"`
// Service internal traffic policy
// Enum: ["Cluster","Local"]
IntTrafficPolicy string `json:"intTrafficPolicy,omitempty"`
// Service name (e.g. Kubernetes service name)
Name string `json:"name,omitempty"`
// Service namespace (e.g. Kubernetes namespace)
Namespace string `json:"namespace,omitempty"`
// Service protocol NAT policy
// Enum: ["None","Nat46","Nat64"]
NatPolicy string `json:"natPolicy,omitempty"`
// Service external traffic policy (deprecated in favor of extTrafficPolicy)
// Enum: ["Cluster","Local"]
TrafficPolicy string `json:"trafficPolicy,omitempty"`
// Service type
// Enum: ["ClusterIP","NodePort","ExternalIPs","HostPort","LoadBalancer","LocalRedirect"]
Type string `json:"type,omitempty"`
}
// Validate validates this service spec flags
func (m *ServiceSpecFlags) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExtTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateIntTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateNatPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateTrafficPolicy(formats); err != nil {
res = append(res, err)
}
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var serviceSpecFlagsTypeExtTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeExtTrafficPolicyPropEnum = append(serviceSpecFlagsTypeExtTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsExtTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsExtTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsExtTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsExtTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateExtTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeExtTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateExtTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.ExtTrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateExtTrafficPolicyEnum("flags"+"."+"extTrafficPolicy", "body", m.ExtTrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeIntTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeIntTrafficPolicyPropEnum = append(serviceSpecFlagsTypeIntTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsIntTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsIntTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsIntTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsIntTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateIntTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeIntTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateIntTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.IntTrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateIntTrafficPolicyEnum("flags"+"."+"intTrafficPolicy", "body", m.IntTrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeNatPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["None","Nat46","Nat64"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeNatPolicyPropEnum = append(serviceSpecFlagsTypeNatPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsNatPolicyNone captures enum value "None"
ServiceSpecFlagsNatPolicyNone string = "None"
// ServiceSpecFlagsNatPolicyNat46 captures enum value "Nat46"
ServiceSpecFlagsNatPolicyNat46 string = "Nat46"
// ServiceSpecFlagsNatPolicyNat64 captures enum value "Nat64"
ServiceSpecFlagsNatPolicyNat64 string = "Nat64"
)
// prop value enum
func (m *ServiceSpecFlags) validateNatPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeNatPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateNatPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.NatPolicy) { // not required
return nil
}
// value enum
if err := m.validateNatPolicyEnum("flags"+"."+"natPolicy", "body", m.NatPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeTrafficPolicyPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Cluster","Local"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeTrafficPolicyPropEnum = append(serviceSpecFlagsTypeTrafficPolicyPropEnum, v)
}
}
const (
// ServiceSpecFlagsTrafficPolicyCluster captures enum value "Cluster"
ServiceSpecFlagsTrafficPolicyCluster string = "Cluster"
// ServiceSpecFlagsTrafficPolicyLocal captures enum value "Local"
ServiceSpecFlagsTrafficPolicyLocal string = "Local"
)
// prop value enum
func (m *ServiceSpecFlags) validateTrafficPolicyEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeTrafficPolicyPropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateTrafficPolicy(formats strfmt.Registry) error {
if swag.IsZero(m.TrafficPolicy) { // not required
return nil
}
// value enum
if err := m.validateTrafficPolicyEnum("flags"+"."+"trafficPolicy", "body", m.TrafficPolicy); err != nil {
return err
}
return nil
}
var serviceSpecFlagsTypeTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["ClusterIP","NodePort","ExternalIPs","HostPort","LoadBalancer","LocalRedirect"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
serviceSpecFlagsTypeTypePropEnum = append(serviceSpecFlagsTypeTypePropEnum, v)
}
}
const (
// ServiceSpecFlagsTypeClusterIP captures enum value "ClusterIP"
ServiceSpecFlagsTypeClusterIP string = "ClusterIP"
// ServiceSpecFlagsTypeNodePort captures enum value "NodePort"
ServiceSpecFlagsTypeNodePort string = "NodePort"
// ServiceSpecFlagsTypeExternalIPs captures enum value "ExternalIPs"
ServiceSpecFlagsTypeExternalIPs string = "ExternalIPs"
// ServiceSpecFlagsTypeHostPort captures enum value "HostPort"
ServiceSpecFlagsTypeHostPort string = "HostPort"
// ServiceSpecFlagsTypeLoadBalancer captures enum value "LoadBalancer"
ServiceSpecFlagsTypeLoadBalancer string = "LoadBalancer"
// ServiceSpecFlagsTypeLocalRedirect captures enum value "LocalRedirect"
ServiceSpecFlagsTypeLocalRedirect string = "LocalRedirect"
)
// prop value enum
func (m *ServiceSpecFlags) validateTypeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, serviceSpecFlagsTypeTypePropEnum, true); err != nil {
return err
}
return nil
}
func (m *ServiceSpecFlags) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
// value enum
if err := m.validateTypeEnum("flags"+"."+"type", "body", m.Type); err != nil {
return err
}
return nil
}
// ContextValidate validates this service spec flags based on context it is used
func (m *ServiceSpecFlags) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ServiceSpecFlags) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceSpecFlags) UnmarshalBinary(b []byte) error {
var res ServiceSpecFlags
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ServiceStatus Configuration of a service
//
// swagger:model ServiceStatus
type ServiceStatus struct {
// realized
Realized *ServiceSpec `json:"realized,omitempty"`
}
// Validate validates this service status
func (m *ServiceStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateRealized(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceStatus) validateRealized(formats strfmt.Registry) error {
if swag.IsZero(m.Realized) { // not required
return nil
}
if m.Realized != nil {
if err := m.Realized.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// ContextValidate validate this service status based on the context it is used
func (m *ServiceStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateRealized(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ServiceStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error {
if m.Realized != nil {
if swag.IsZero(m.Realized) { // not required
return nil
}
if err := m.Realized.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("realized")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("realized")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *ServiceStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ServiceStatus) UnmarshalBinary(b []byte) error {
var res ServiceStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Srv6 Status of the SRv6
//
// swagger:model Srv6
type Srv6 struct {
// enabled
Enabled bool `json:"enabled,omitempty"`
// srv6 encap mode
// Enum: ["SRH","Reduced"]
Srv6EncapMode string `json:"srv6EncapMode,omitempty"`
}
// Validate validates this srv6
func (m *Srv6) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateSrv6EncapMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var srv6TypeSrv6EncapModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["SRH","Reduced"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
srv6TypeSrv6EncapModePropEnum = append(srv6TypeSrv6EncapModePropEnum, v)
}
}
const (
// Srv6Srv6EncapModeSRH captures enum value "SRH"
Srv6Srv6EncapModeSRH string = "SRH"
// Srv6Srv6EncapModeReduced captures enum value "Reduced"
Srv6Srv6EncapModeReduced string = "Reduced"
)
// prop value enum
func (m *Srv6) validateSrv6EncapModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, srv6TypeSrv6EncapModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Srv6) validateSrv6EncapMode(formats strfmt.Registry) error {
if swag.IsZero(m.Srv6EncapMode) { // not required
return nil
}
// value enum
if err := m.validateSrv6EncapModeEnum("srv6EncapMode", "body", m.Srv6EncapMode); err != nil {
return err
}
return nil
}
// ContextValidate validates this srv6 based on context it is used
func (m *Srv6) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Srv6) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Srv6) UnmarshalBinary(b []byte) error {
var res Srv6
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// StateDBQuery StateDB query
//
// swagger:model StateDBQuery
type StateDBQuery struct {
// Index to query against
Index string `json:"index,omitempty"`
// Key to query with. Base64 encoded.
Key string `json:"key,omitempty"`
// LowerBound prefix search or full-matching Get
Lowerbound bool `json:"lowerbound,omitempty"`
// Name of the table to query
Table string `json:"table,omitempty"`
}
// Validate validates this state d b query
func (m *StateDBQuery) Validate(formats strfmt.Registry) error {
return nil
}
// ContextValidate validates this state d b query based on context it is used
func (m *StateDBQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *StateDBQuery) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *StateDBQuery) UnmarshalBinary(b []byte) error {
var res StateDBQuery
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// Status Status of an individual component
//
// swagger:model Status
type Status struct {
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
// State the component is in
// Enum: ["Ok","Warning","Failure","Disabled"]
State string `json:"state,omitempty"`
}
// Validate validates this status
func (m *Status) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var statusTypeStatePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Ok","Warning","Failure","Disabled"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
statusTypeStatePropEnum = append(statusTypeStatePropEnum, v)
}
}
const (
// StatusStateOk captures enum value "Ok"
StatusStateOk string = "Ok"
// StatusStateWarning captures enum value "Warning"
StatusStateWarning string = "Warning"
// StatusStateFailure captures enum value "Failure"
StatusStateFailure string = "Failure"
// StatusStateDisabled captures enum value "Disabled"
StatusStateDisabled string = "Disabled"
)
// prop value enum
func (m *Status) validateStateEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, statusTypeStatePropEnum, true); err != nil {
return err
}
return nil
}
func (m *Status) validateState(formats strfmt.Registry) error {
if swag.IsZero(m.State) { // not required
return nil
}
// value enum
if err := m.validateStateEnum("state", "body", m.State); err != nil {
return err
}
return nil
}
// ContextValidate validates this status based on context it is used
func (m *Status) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Status) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Status) UnmarshalBinary(b []byte) error {
var res Status
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// StatusResponse Health and status information of daemon
//
// +k8s:deepcopy-gen=true
//
// swagger:model StatusResponse
type StatusResponse struct {
// Status of core datapath attachment mode
AttachMode AttachMode `json:"attach-mode,omitempty"`
// Status of Mutual Authentication certificate provider
AuthCertificateProvider *Status `json:"auth-certificate-provider,omitempty"`
// Status of bandwidth manager
BandwidthManager *BandwidthManager `json:"bandwidth-manager,omitempty"`
// Status of BPF maps
BpfMaps *BPFMapStatus `json:"bpf-maps,omitempty"`
// Status of Cilium daemon
Cilium *Status `json:"cilium,omitempty"`
// When supported by the API, this client ID should be used by the
// client when making another request to the server.
// See for example "/cluster/nodes".
//
ClientID int64 `json:"client-id,omitempty"`
// Status of clock source
ClockSource *ClockSource `json:"clock-source,omitempty"`
// Status of cluster
Cluster *ClusterStatus `json:"cluster,omitempty"`
// Status of ClusterMesh
ClusterMesh *ClusterMeshStatus `json:"cluster-mesh,omitempty"`
// Status of CNI chaining
CniChaining *CNIChainingStatus `json:"cni-chaining,omitempty"`
// Status of the CNI configuration file
CniFile *Status `json:"cni-file,omitempty"`
// Status of local container runtime
ContainerRuntime *Status `json:"container-runtime,omitempty"`
// Status of all endpoint controllers
Controllers ControllerStatuses `json:"controllers,omitempty"`
// Status of datapath mode
DatapathMode DatapathMode `json:"datapath-mode,omitempty"`
// Status of transparent encryption
Encryption *EncryptionStatus `json:"encryption,omitempty"`
// Status of the host firewall
HostFirewall *HostFirewall `json:"host-firewall,omitempty"`
// Status of Hubble server
Hubble *HubbleStatus `json:"hubble,omitempty"`
// Status of identity range of the cluster
IdentityRange *IdentityRange `json:"identity-range,omitempty"`
// Status of IP address management
Ipam *IPAMStatus `json:"ipam,omitempty"`
// Status of IPv4 BIG TCP
IPV4BigTCP *IPV4BigTCP `json:"ipv4-big-tcp,omitempty"`
// Status of IPv6 BIG TCP
IPV6BigTCP *IPV6BigTCP `json:"ipv6-big-tcp,omitempty"`
// Status of kube-proxy replacement
KubeProxyReplacement *KubeProxyReplacement `json:"kube-proxy-replacement,omitempty"`
// Status of Kubernetes integration
Kubernetes *K8sStatus `json:"kubernetes,omitempty"`
// Status of key/value datastore
Kvstore *Status `json:"kvstore,omitempty"`
// Status of masquerading
Masquerading *Masquerading `json:"masquerading,omitempty"`
// Status of the node monitor
NodeMonitor *MonitorStatus `json:"nodeMonitor,omitempty"`
// Status of proxy
Proxy *ProxyStatus `json:"proxy,omitempty"`
// Status of routing
Routing *Routing `json:"routing,omitempty"`
// Status of SRv6
Srv6 *Srv6 `json:"srv6,omitempty"`
// List of stale information in the status
Stale map[string]strfmt.DateTime `json:"stale,omitempty"`
}
// Validate validates this status response
func (m *StatusResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateAttachMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateAuthCertificateProvider(formats); err != nil {
res = append(res, err)
}
if err := m.validateBandwidthManager(formats); err != nil {
res = append(res, err)
}
if err := m.validateBpfMaps(formats); err != nil {
res = append(res, err)
}
if err := m.validateCilium(formats); err != nil {
res = append(res, err)
}
if err := m.validateClockSource(formats); err != nil {
res = append(res, err)
}
if err := m.validateCluster(formats); err != nil {
res = append(res, err)
}
if err := m.validateClusterMesh(formats); err != nil {
res = append(res, err)
}
if err := m.validateCniChaining(formats); err != nil {
res = append(res, err)
}
if err := m.validateCniFile(formats); err != nil {
res = append(res, err)
}
if err := m.validateContainerRuntime(formats); err != nil {
res = append(res, err)
}
if err := m.validateControllers(formats); err != nil {
res = append(res, err)
}
if err := m.validateDatapathMode(formats); err != nil {
res = append(res, err)
}
if err := m.validateEncryption(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostFirewall(formats); err != nil {
res = append(res, err)
}
if err := m.validateHubble(formats); err != nil {
res = append(res, err)
}
if err := m.validateIdentityRange(formats); err != nil {
res = append(res, err)
}
if err := m.validateIpam(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV4BigTCP(formats); err != nil {
res = append(res, err)
}
if err := m.validateIPV6BigTCP(formats); err != nil {
res = append(res, err)
}
if err := m.validateKubeProxyReplacement(formats); err != nil {
res = append(res, err)
}
if err := m.validateKubernetes(formats); err != nil {
res = append(res, err)
}
if err := m.validateKvstore(formats); err != nil {
res = append(res, err)
}
if err := m.validateMasquerading(formats); err != nil {
res = append(res, err)
}
if err := m.validateNodeMonitor(formats); err != nil {
res = append(res, err)
}
if err := m.validateProxy(formats); err != nil {
res = append(res, err)
}
if err := m.validateRouting(formats); err != nil {
res = append(res, err)
}
if err := m.validateSrv6(formats); err != nil {
res = append(res, err)
}
if err := m.validateStale(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *StatusResponse) validateAttachMode(formats strfmt.Registry) error {
if swag.IsZero(m.AttachMode) { // not required
return nil
}
if err := m.AttachMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("attach-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("attach-mode")
}
return err
}
return nil
}
func (m *StatusResponse) validateAuthCertificateProvider(formats strfmt.Registry) error {
if swag.IsZero(m.AuthCertificateProvider) { // not required
return nil
}
if m.AuthCertificateProvider != nil {
if err := m.AuthCertificateProvider.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("auth-certificate-provider")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("auth-certificate-provider")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateBandwidthManager(formats strfmt.Registry) error {
if swag.IsZero(m.BandwidthManager) { // not required
return nil
}
if m.BandwidthManager != nil {
if err := m.BandwidthManager.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bandwidth-manager")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bandwidth-manager")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateBpfMaps(formats strfmt.Registry) error {
if swag.IsZero(m.BpfMaps) { // not required
return nil
}
if m.BpfMaps != nil {
if err := m.BpfMaps.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf-maps")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf-maps")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCilium(formats strfmt.Registry) error {
if swag.IsZero(m.Cilium) { // not required
return nil
}
if m.Cilium != nil {
if err := m.Cilium.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateClockSource(formats strfmt.Registry) error {
if swag.IsZero(m.ClockSource) { // not required
return nil
}
if m.ClockSource != nil {
if err := m.ClockSource.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clock-source")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clock-source")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCluster(formats strfmt.Registry) error {
if swag.IsZero(m.Cluster) { // not required
return nil
}
if m.Cluster != nil {
if err := m.Cluster.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateClusterMesh(formats strfmt.Registry) error {
if swag.IsZero(m.ClusterMesh) { // not required
return nil
}
if m.ClusterMesh != nil {
if err := m.ClusterMesh.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster-mesh")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster-mesh")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCniChaining(formats strfmt.Registry) error {
if swag.IsZero(m.CniChaining) { // not required
return nil
}
if m.CniChaining != nil {
if err := m.CniChaining.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-chaining")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-chaining")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateCniFile(formats strfmt.Registry) error {
if swag.IsZero(m.CniFile) { // not required
return nil
}
if m.CniFile != nil {
if err := m.CniFile.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-file")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-file")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateContainerRuntime(formats strfmt.Registry) error {
if swag.IsZero(m.ContainerRuntime) { // not required
return nil
}
if m.ContainerRuntime != nil {
if err := m.ContainerRuntime.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("container-runtime")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("container-runtime")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateControllers(formats strfmt.Registry) error {
if swag.IsZero(m.Controllers) { // not required
return nil
}
if err := m.Controllers.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *StatusResponse) validateDatapathMode(formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-mode")
}
return err
}
return nil
}
func (m *StatusResponse) validateEncryption(formats strfmt.Registry) error {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if m.Encryption != nil {
if err := m.Encryption.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateHostFirewall(formats strfmt.Registry) error {
if swag.IsZero(m.HostFirewall) { // not required
return nil
}
if m.HostFirewall != nil {
if err := m.HostFirewall.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-firewall")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-firewall")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateHubble(formats strfmt.Registry) error {
if swag.IsZero(m.Hubble) { // not required
return nil
}
if m.Hubble != nil {
if err := m.Hubble.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("hubble")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("hubble")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIdentityRange(formats strfmt.Registry) error {
if swag.IsZero(m.IdentityRange) { // not required
return nil
}
if m.IdentityRange != nil {
if err := m.IdentityRange.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity-range")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity-range")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIpam(formats strfmt.Registry) error {
if swag.IsZero(m.Ipam) { // not required
return nil
}
if m.Ipam != nil {
if err := m.Ipam.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipam")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipam")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIPV4BigTCP(formats strfmt.Registry) error {
if swag.IsZero(m.IPV4BigTCP) { // not required
return nil
}
if m.IPV4BigTCP != nil {
if err := m.IPV4BigTCP.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateIPV6BigTCP(formats strfmt.Registry) error {
if swag.IsZero(m.IPV6BigTCP) { // not required
return nil
}
if m.IPV6BigTCP != nil {
if err := m.IPV6BigTCP.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKubeProxyReplacement(formats strfmt.Registry) error {
if swag.IsZero(m.KubeProxyReplacement) { // not required
return nil
}
if m.KubeProxyReplacement != nil {
if err := m.KubeProxyReplacement.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kube-proxy-replacement")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kube-proxy-replacement")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKubernetes(formats strfmt.Registry) error {
if swag.IsZero(m.Kubernetes) { // not required
return nil
}
if m.Kubernetes != nil {
if err := m.Kubernetes.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kubernetes")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kubernetes")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateKvstore(formats strfmt.Registry) error {
if swag.IsZero(m.Kvstore) { // not required
return nil
}
if m.Kvstore != nil {
if err := m.Kvstore.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstore")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstore")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateMasquerading(formats strfmt.Registry) error {
if swag.IsZero(m.Masquerading) { // not required
return nil
}
if m.Masquerading != nil {
if err := m.Masquerading.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masquerading")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masquerading")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateNodeMonitor(formats strfmt.Registry) error {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if m.NodeMonitor != nil {
if err := m.NodeMonitor.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateProxy(formats strfmt.Registry) error {
if swag.IsZero(m.Proxy) { // not required
return nil
}
if m.Proxy != nil {
if err := m.Proxy.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateRouting(formats strfmt.Registry) error {
if swag.IsZero(m.Routing) { // not required
return nil
}
if m.Routing != nil {
if err := m.Routing.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("routing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("routing")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateSrv6(formats strfmt.Registry) error {
if swag.IsZero(m.Srv6) { // not required
return nil
}
if m.Srv6 != nil {
if err := m.Srv6.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("srv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("srv6")
}
return err
}
}
return nil
}
func (m *StatusResponse) validateStale(formats strfmt.Registry) error {
if swag.IsZero(m.Stale) { // not required
return nil
}
for k := range m.Stale {
if err := validate.FormatOf("stale"+"."+k, "body", "date-time", m.Stale[k].String(), formats); err != nil {
return err
}
}
return nil
}
// ContextValidate validate this status response based on the context it is used
func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateAttachMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateAuthCertificateProvider(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateBandwidthManager(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateBpfMaps(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCilium(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateClockSource(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCluster(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateClusterMesh(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCniChaining(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateCniFile(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateContainerRuntime(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateControllers(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateDatapathMode(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateEncryption(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHostFirewall(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateHubble(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIdentityRange(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIpam(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV4BigTCP(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateIPV6BigTCP(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKubeProxyReplacement(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKubernetes(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateKvstore(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateMasquerading(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateNodeMonitor(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateProxy(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateRouting(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateSrv6(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *StatusResponse) contextValidateAttachMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.AttachMode) { // not required
return nil
}
if err := m.AttachMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("attach-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("attach-mode")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateAuthCertificateProvider(ctx context.Context, formats strfmt.Registry) error {
if m.AuthCertificateProvider != nil {
if swag.IsZero(m.AuthCertificateProvider) { // not required
return nil
}
if err := m.AuthCertificateProvider.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("auth-certificate-provider")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("auth-certificate-provider")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateBandwidthManager(ctx context.Context, formats strfmt.Registry) error {
if m.BandwidthManager != nil {
if swag.IsZero(m.BandwidthManager) { // not required
return nil
}
if err := m.BandwidthManager.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bandwidth-manager")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bandwidth-manager")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateBpfMaps(ctx context.Context, formats strfmt.Registry) error {
if m.BpfMaps != nil {
if swag.IsZero(m.BpfMaps) { // not required
return nil
}
if err := m.BpfMaps.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("bpf-maps")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("bpf-maps")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCilium(ctx context.Context, formats strfmt.Registry) error {
if m.Cilium != nil {
if swag.IsZero(m.Cilium) { // not required
return nil
}
if err := m.Cilium.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cilium")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cilium")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateClockSource(ctx context.Context, formats strfmt.Registry) error {
if m.ClockSource != nil {
if swag.IsZero(m.ClockSource) { // not required
return nil
}
if err := m.ClockSource.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("clock-source")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("clock-source")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCluster(ctx context.Context, formats strfmt.Registry) error {
if m.Cluster != nil {
if swag.IsZero(m.Cluster) { // not required
return nil
}
if err := m.Cluster.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateClusterMesh(ctx context.Context, formats strfmt.Registry) error {
if m.ClusterMesh != nil {
if swag.IsZero(m.ClusterMesh) { // not required
return nil
}
if err := m.ClusterMesh.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cluster-mesh")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cluster-mesh")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCniChaining(ctx context.Context, formats strfmt.Registry) error {
if m.CniChaining != nil {
if swag.IsZero(m.CniChaining) { // not required
return nil
}
if err := m.CniChaining.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-chaining")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-chaining")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateCniFile(ctx context.Context, formats strfmt.Registry) error {
if m.CniFile != nil {
if swag.IsZero(m.CniFile) { // not required
return nil
}
if err := m.CniFile.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cni-file")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("cni-file")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateContainerRuntime(ctx context.Context, formats strfmt.Registry) error {
if m.ContainerRuntime != nil {
if swag.IsZero(m.ContainerRuntime) { // not required
return nil
}
if err := m.ContainerRuntime.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("container-runtime")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("container-runtime")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateControllers(ctx context.Context, formats strfmt.Registry) error {
if err := m.Controllers.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("controllers")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("controllers")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateDatapathMode(ctx context.Context, formats strfmt.Registry) error {
if swag.IsZero(m.DatapathMode) { // not required
return nil
}
if err := m.DatapathMode.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("datapath-mode")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("datapath-mode")
}
return err
}
return nil
}
func (m *StatusResponse) contextValidateEncryption(ctx context.Context, formats strfmt.Registry) error {
if m.Encryption != nil {
if swag.IsZero(m.Encryption) { // not required
return nil
}
if err := m.Encryption.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("encryption")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("encryption")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateHostFirewall(ctx context.Context, formats strfmt.Registry) error {
if m.HostFirewall != nil {
if swag.IsZero(m.HostFirewall) { // not required
return nil
}
if err := m.HostFirewall.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("host-firewall")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("host-firewall")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateHubble(ctx context.Context, formats strfmt.Registry) error {
if m.Hubble != nil {
if swag.IsZero(m.Hubble) { // not required
return nil
}
if err := m.Hubble.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("hubble")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("hubble")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIdentityRange(ctx context.Context, formats strfmt.Registry) error {
if m.IdentityRange != nil {
if swag.IsZero(m.IdentityRange) { // not required
return nil
}
if err := m.IdentityRange.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("identity-range")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("identity-range")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIpam(ctx context.Context, formats strfmt.Registry) error {
if m.Ipam != nil {
if swag.IsZero(m.Ipam) { // not required
return nil
}
if err := m.Ipam.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipam")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipam")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIPV4BigTCP(ctx context.Context, formats strfmt.Registry) error {
if m.IPV4BigTCP != nil {
if swag.IsZero(m.IPV4BigTCP) { // not required
return nil
}
if err := m.IPV4BigTCP.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv4-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv4-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateIPV6BigTCP(ctx context.Context, formats strfmt.Registry) error {
if m.IPV6BigTCP != nil {
if swag.IsZero(m.IPV6BigTCP) { // not required
return nil
}
if err := m.IPV6BigTCP.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("ipv6-big-tcp")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("ipv6-big-tcp")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKubeProxyReplacement(ctx context.Context, formats strfmt.Registry) error {
if m.KubeProxyReplacement != nil {
if swag.IsZero(m.KubeProxyReplacement) { // not required
return nil
}
if err := m.KubeProxyReplacement.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kube-proxy-replacement")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kube-proxy-replacement")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKubernetes(ctx context.Context, formats strfmt.Registry) error {
if m.Kubernetes != nil {
if swag.IsZero(m.Kubernetes) { // not required
return nil
}
if err := m.Kubernetes.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kubernetes")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kubernetes")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateKvstore(ctx context.Context, formats strfmt.Registry) error {
if m.Kvstore != nil {
if swag.IsZero(m.Kvstore) { // not required
return nil
}
if err := m.Kvstore.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("kvstore")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("kvstore")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateMasquerading(ctx context.Context, formats strfmt.Registry) error {
if m.Masquerading != nil {
if swag.IsZero(m.Masquerading) { // not required
return nil
}
if err := m.Masquerading.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("masquerading")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("masquerading")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateNodeMonitor(ctx context.Context, formats strfmt.Registry) error {
if m.NodeMonitor != nil {
if swag.IsZero(m.NodeMonitor) { // not required
return nil
}
if err := m.NodeMonitor.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodeMonitor")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("nodeMonitor")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateProxy(ctx context.Context, formats strfmt.Registry) error {
if m.Proxy != nil {
if swag.IsZero(m.Proxy) { // not required
return nil
}
if err := m.Proxy.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proxy")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("proxy")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateRouting(ctx context.Context, formats strfmt.Registry) error {
if m.Routing != nil {
if swag.IsZero(m.Routing) { // not required
return nil
}
if err := m.Routing.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("routing")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("routing")
}
return err
}
}
return nil
}
func (m *StatusResponse) contextValidateSrv6(ctx context.Context, formats strfmt.Registry) error {
if m.Srv6 != nil {
if swag.IsZero(m.Srv6) { // not required
return nil
}
if err := m.Srv6.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("srv6")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("srv6")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *StatusResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *StatusResponse) UnmarshalBinary(b []byte) error {
var res StatusResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceFrom trace from
//
// swagger:model TraceFrom
type TraceFrom struct {
// labels
Labels Labels `json:"labels,omitempty"`
}
// Validate validates this trace from
func (m *TraceFrom) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceFrom) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this trace from based on the context it is used
func (m *TraceFrom) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceFrom) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceFrom) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceFrom) UnmarshalBinary(b []byte) error {
var res TraceFrom
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceSelector Context describing a pair of source and destination identity
//
// swagger:model TraceSelector
type TraceSelector struct {
// from
From *TraceFrom `json:"from,omitempty"`
// to
To *TraceTo `json:"to,omitempty"`
// Enable verbose tracing.
//
Verbose bool `json:"verbose,omitempty"`
}
// Validate validates this trace selector
func (m *TraceSelector) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFrom(formats); err != nil {
res = append(res, err)
}
if err := m.validateTo(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceSelector) validateFrom(formats strfmt.Registry) error {
if swag.IsZero(m.From) { // not required
return nil
}
if m.From != nil {
if err := m.From.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("from")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("from")
}
return err
}
}
return nil
}
func (m *TraceSelector) validateTo(formats strfmt.Registry) error {
if swag.IsZero(m.To) { // not required
return nil
}
if m.To != nil {
if err := m.To.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("to")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("to")
}
return err
}
}
return nil
}
// ContextValidate validate this trace selector based on the context it is used
func (m *TraceSelector) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateFrom(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateTo(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceSelector) contextValidateFrom(ctx context.Context, formats strfmt.Registry) error {
if m.From != nil {
if swag.IsZero(m.From) { // not required
return nil
}
if err := m.From.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("from")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("from")
}
return err
}
}
return nil
}
func (m *TraceSelector) contextValidateTo(ctx context.Context, formats strfmt.Registry) error {
if m.To != nil {
if swag.IsZero(m.To) { // not required
return nil
}
if err := m.To.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("to")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("to")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceSelector) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceSelector) UnmarshalBinary(b []byte) error {
var res TraceSelector
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TraceTo trace to
//
// swagger:model TraceTo
type TraceTo struct {
// List of Layer 4 port and protocol pairs which will be used in communication
// from the source identity to the destination identity.
//
Dports []*Port `json:"dports"`
// labels
Labels Labels `json:"labels,omitempty"`
}
// Validate validates this trace to
func (m *TraceTo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDports(formats); err != nil {
res = append(res, err)
}
if err := m.validateLabels(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceTo) validateDports(formats strfmt.Registry) error {
if swag.IsZero(m.Dports) { // not required
return nil
}
for i := 0; i < len(m.Dports); i++ {
if swag.IsZero(m.Dports[i]) { // not required
continue
}
if m.Dports[i] != nil {
if err := m.Dports[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("dports" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("dports" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *TraceTo) validateLabels(formats strfmt.Registry) error {
if swag.IsZero(m.Labels) { // not required
return nil
}
if err := m.Labels.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// ContextValidate validate this trace to based on the context it is used
func (m *TraceTo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateDports(ctx, formats); err != nil {
res = append(res, err)
}
if err := m.contextValidateLabels(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TraceTo) contextValidateDports(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Dports); i++ {
if m.Dports[i] != nil {
if swag.IsZero(m.Dports[i]) { // not required
return nil
}
if err := m.Dports[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("dports" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("dports" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *TraceTo) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error {
if err := m.Labels.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("labels")
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("labels")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TraceTo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TraceTo) UnmarshalBinary(b []byte) error {
var res TraceTo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// WireguardInterface Status of a WireGuard interface
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardInterface
type WireguardInterface struct {
// Port on which the WireGuard endpoint is exposed
ListenPort int64 `json:"listen-port,omitempty"`
// Name of the interface
Name string `json:"name,omitempty"`
// Number of peers configured on this interface
PeerCount int64 `json:"peer-count,omitempty"`
// Optional list of WireGuard peers
Peers []*WireguardPeer `json:"peers"`
// Public key of this interface
PublicKey string `json:"public-key,omitempty"`
}
// Validate validates this wireguard interface
func (m *WireguardInterface) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePeers(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardInterface) validatePeers(formats strfmt.Registry) error {
if swag.IsZero(m.Peers) { // not required
return nil
}
for i := 0; i < len(m.Peers); i++ {
if swag.IsZero(m.Peers[i]) { // not required
continue
}
if m.Peers[i] != nil {
if err := m.Peers[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("peers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("peers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this wireguard interface based on the context it is used
func (m *WireguardInterface) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidatePeers(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardInterface) contextValidatePeers(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Peers); i++ {
if m.Peers[i] != nil {
if swag.IsZero(m.Peers[i]) { // not required
return nil
}
if err := m.Peers[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("peers" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("peers" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *WireguardInterface) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardInterface) UnmarshalBinary(b []byte) error {
var res WireguardInterface
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// WireguardPeer Status of a WireGuard peer
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardPeer
type WireguardPeer struct {
// List of IPs which may be routed through this peer
AllowedIps []string `json:"allowed-ips"`
// Endpoint on which we are connected to this peer
Endpoint string `json:"endpoint,omitempty"`
// Timestamp of the last handshake with this peer
// Format: date-time
LastHandshakeTime strfmt.DateTime `json:"last-handshake-time,omitempty"`
// Public key of this peer
PublicKey string `json:"public-key,omitempty"`
// Number of received bytes
TransferRx int64 `json:"transfer-rx,omitempty"`
// Number of sent bytes
TransferTx int64 `json:"transfer-tx,omitempty"`
}
// Validate validates this wireguard peer
func (m *WireguardPeer) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateLastHandshakeTime(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardPeer) validateLastHandshakeTime(formats strfmt.Registry) error {
if swag.IsZero(m.LastHandshakeTime) { // not required
return nil
}
if err := validate.FormatOf("last-handshake-time", "body", "date-time", m.LastHandshakeTime.String(), formats); err != nil {
return err
}
return nil
}
// ContextValidate validates this wireguard peer based on context it is used
func (m *WireguardPeer) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *WireguardPeer) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardPeer) UnmarshalBinary(b []byte) error {
var res WireguardPeer
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"strconv"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// WireguardStatus Status of the WireGuard agent
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardStatus
type WireguardStatus struct {
// WireGuard interfaces managed by this Cilium instance
Interfaces []*WireguardInterface `json:"interfaces"`
// Node Encryption status
NodeEncryption string `json:"node-encryption,omitempty"`
}
// Validate validates this wireguard status
func (m *WireguardStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateInterfaces(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardStatus) validateInterfaces(formats strfmt.Registry) error {
if swag.IsZero(m.Interfaces) { // not required
return nil
}
for i := 0; i < len(m.Interfaces); i++ {
if swag.IsZero(m.Interfaces[i]) { // not required
continue
}
if m.Interfaces[i] != nil {
if err := m.Interfaces[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("interfaces" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("interfaces" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// ContextValidate validate this wireguard status based on the context it is used
func (m *WireguardStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
if err := m.contextValidateInterfaces(ctx, formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *WireguardStatus) contextValidateInterfaces(ctx context.Context, formats strfmt.Registry) error {
for i := 0; i < len(m.Interfaces); i++ {
if m.Interfaces[i] != nil {
if swag.IsZero(m.Interfaces[i]) { // not required
return nil
}
if err := m.Interfaces[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("interfaces" + "." + strconv.Itoa(i))
} else if ce, ok := err.(*errors.CompositeError); ok {
return ce.ValidateName("interfaces" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *WireguardStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *WireguardStatus) UnmarshalBinary(b []byte) error {
var res WireguardStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package models
import (
strfmt "github.com/go-openapi/strfmt"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BPFMapStatus) DeepCopyInto(out *BPFMapStatus) {
*out = *in
if in.Maps != nil {
in, out := &in.Maps, &out.Maps
*out = make([]*BPFMapProperties, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(BPFMapProperties)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BPFMapStatus.
func (in *BPFMapStatus) DeepCopy() *BPFMapStatus {
if in == nil {
return nil
}
out := new(BPFMapStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BandwidthManager) DeepCopyInto(out *BandwidthManager) {
*out = *in
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthManager.
func (in *BandwidthManager) DeepCopy() *BandwidthManager {
if in == nil {
return nil
}
out := new(BandwidthManager)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BgpPeer) DeepCopyInto(out *BgpPeer) {
*out = *in
if in.Families != nil {
in, out := &in.Families, &out.Families
*out = make([]*BgpPeerFamilies, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(BgpPeerFamilies)
**out = **in
}
}
}
if in.GracefulRestart != nil {
in, out := &in.GracefulRestart, &out.GracefulRestart
*out = new(BgpGracefulRestart)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BgpPeer.
func (in *BgpPeer) DeepCopy() *BgpPeer {
if in == nil {
return nil
}
out := new(BgpPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterMeshStatus) DeepCopyInto(out *ClusterMeshStatus) {
*out = *in
if in.Clusters != nil {
in, out := &in.Clusters, &out.Clusters
*out = make([]*RemoteCluster, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(RemoteCluster)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMeshStatus.
func (in *ClusterMeshStatus) DeepCopy() *ClusterMeshStatus {
if in == nil {
return nil
}
out := new(ClusterMeshStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = *in
if in.CiliumHealth != nil {
in, out := &in.CiliumHealth, &out.CiliumHealth
*out = new(Status)
**out = **in
}
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
*out = make([]*NodeElement, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(NodeElement)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
func (in *ClusterStatus) DeepCopy() *ClusterStatus {
if in == nil {
return nil
}
out := new(ClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatus) DeepCopyInto(out *ControllerStatus) {
*out = *in
if in.Configuration != nil {
in, out := &in.Configuration, &out.Configuration
*out = new(ControllerStatusConfiguration)
**out = **in
}
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(ControllerStatusStatus)
(*in).DeepCopyInto(*out)
}
in.UUID.DeepCopyInto(&out.UUID)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatus.
func (in *ControllerStatus) DeepCopy() *ControllerStatus {
if in == nil {
return nil
}
out := new(ControllerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatusConfiguration) DeepCopyInto(out *ControllerStatusConfiguration) {
*out = *in
in.ErrorRetryBase.DeepCopyInto(&out.ErrorRetryBase)
in.Interval.DeepCopyInto(&out.Interval)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatusConfiguration.
func (in *ControllerStatusConfiguration) DeepCopy() *ControllerStatusConfiguration {
if in == nil {
return nil
}
out := new(ControllerStatusConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatusStatus) DeepCopyInto(out *ControllerStatusStatus) {
*out = *in
in.LastFailureTimestamp.DeepCopyInto(&out.LastFailureTimestamp)
in.LastSuccessTimestamp.DeepCopyInto(&out.LastSuccessTimestamp)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatusStatus.
func (in *ControllerStatusStatus) DeepCopy() *ControllerStatusStatus {
if in == nil {
return nil
}
out := new(ControllerStatusStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionStatus) DeepCopyInto(out *EncryptionStatus) {
*out = *in
if in.Ipsec != nil {
in, out := &in.Ipsec, &out.Ipsec
*out = new(IPsecStatus)
(*in).DeepCopyInto(*out)
}
if in.Wireguard != nil {
in, out := &in.Wireguard, &out.Wireguard
*out = new(WireguardStatus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionStatus.
func (in *EncryptionStatus) DeepCopy() *EncryptionStatus {
if in == nil {
return nil
}
out := new(EncryptionStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostFirewall) DeepCopyInto(out *HostFirewall) {
*out = *in
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirewall.
func (in *HostFirewall) DeepCopy() *HostFirewall {
if in == nil {
return nil
}
out := new(HostFirewall)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HubbleStatus) DeepCopyInto(out *HubbleStatus) {
*out = *in
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = new(HubbleStatusMetrics)
**out = **in
}
if in.Observer != nil {
in, out := &in.Observer, &out.Observer
*out = new(HubbleStatusObserver)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubbleStatus.
func (in *HubbleStatus) DeepCopy() *HubbleStatus {
if in == nil {
return nil
}
out := new(HubbleStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HubbleStatusObserver) DeepCopyInto(out *HubbleStatusObserver) {
*out = *in
in.Uptime.DeepCopyInto(&out.Uptime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubbleStatusObserver.
func (in *HubbleStatusObserver) DeepCopy() *HubbleStatusObserver {
if in == nil {
return nil
}
out := new(HubbleStatusObserver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAMStatus) DeepCopyInto(out *IPAMStatus) {
*out = *in
if in.Allocations != nil {
in, out := &in.Allocations, &out.Allocations
*out = make(AllocationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.IPV4 != nil {
in, out := &in.IPV4, &out.IPV4
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.IPV6 != nil {
in, out := &in.IPV6, &out.IPV6
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMStatus.
func (in *IPAMStatus) DeepCopy() *IPAMStatus {
if in == nil {
return nil
}
out := new(IPAMStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPsecStatus) DeepCopyInto(out *IPsecStatus) {
*out = *in
if in.DecryptInterfaces != nil {
in, out := &in.DecryptInterfaces, &out.DecryptInterfaces
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.XfrmErrors != nil {
in, out := &in.XfrmErrors, &out.XfrmErrors
*out = make(map[string]int64, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecStatus.
func (in *IPsecStatus) DeepCopy() *IPsecStatus {
if in == nil {
return nil
}
out := new(IPsecStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *K8sStatus) DeepCopyInto(out *K8sStatus) {
*out = *in
if in.K8sAPIVersions != nil {
in, out := &in.K8sAPIVersions, &out.K8sAPIVersions
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sStatus.
func (in *K8sStatus) DeepCopy() *K8sStatus {
if in == nil {
return nil
}
out := new(K8sStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacement) DeepCopyInto(out *KubeProxyReplacement) {
*out = *in
if in.DeviceList != nil {
in, out := &in.DeviceList, &out.DeviceList
*out = make([]*KubeProxyReplacementDeviceListItems0, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(KubeProxyReplacementDeviceListItems0)
(*in).DeepCopyInto(*out)
}
}
}
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = new(KubeProxyReplacementFeatures)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacement.
func (in *KubeProxyReplacement) DeepCopy() *KubeProxyReplacement {
if in == nil {
return nil
}
out := new(KubeProxyReplacement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementDeviceListItems0) DeepCopyInto(out *KubeProxyReplacementDeviceListItems0) {
*out = *in
if in.IP != nil {
in, out := &in.IP, &out.IP
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementDeviceListItems0.
func (in *KubeProxyReplacementDeviceListItems0) DeepCopy() *KubeProxyReplacementDeviceListItems0 {
if in == nil {
return nil
}
out := new(KubeProxyReplacementDeviceListItems0)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeatures) DeepCopyInto(out *KubeProxyReplacementFeatures) {
*out = *in
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExternalIPs != nil {
in, out := &in.ExternalIPs, &out.ExternalIPs
*out = new(KubeProxyReplacementFeaturesExternalIPs)
**out = **in
}
if in.GracefulTermination != nil {
in, out := &in.GracefulTermination, &out.GracefulTermination
*out = new(KubeProxyReplacementFeaturesGracefulTermination)
**out = **in
}
if in.HostPort != nil {
in, out := &in.HostPort, &out.HostPort
*out = new(KubeProxyReplacementFeaturesHostPort)
**out = **in
}
if in.HostReachableServices != nil {
in, out := &in.HostReachableServices, &out.HostReachableServices
*out = new(KubeProxyReplacementFeaturesHostReachableServices)
(*in).DeepCopyInto(*out)
}
if in.Nat46X64 != nil {
in, out := &in.Nat46X64, &out.Nat46X64
*out = new(KubeProxyReplacementFeaturesNat46X64)
(*in).DeepCopyInto(*out)
}
if in.NodePort != nil {
in, out := &in.NodePort, &out.NodePort
*out = new(KubeProxyReplacementFeaturesNodePort)
**out = **in
}
if in.SessionAffinity != nil {
in, out := &in.SessionAffinity, &out.SessionAffinity
*out = new(KubeProxyReplacementFeaturesSessionAffinity)
**out = **in
}
if in.SocketLB != nil {
in, out := &in.SocketLB, &out.SocketLB
*out = new(KubeProxyReplacementFeaturesSocketLB)
**out = **in
}
if in.SocketLBTracing != nil {
in, out := &in.SocketLBTracing, &out.SocketLBTracing
*out = new(KubeProxyReplacementFeaturesSocketLBTracing)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeatures.
func (in *KubeProxyReplacementFeatures) DeepCopy() *KubeProxyReplacementFeatures {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeatures)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesHostReachableServices) DeepCopyInto(out *KubeProxyReplacementFeaturesHostReachableServices) {
*out = *in
if in.Protocols != nil {
in, out := &in.Protocols, &out.Protocols
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesHostReachableServices.
func (in *KubeProxyReplacementFeaturesHostReachableServices) DeepCopy() *KubeProxyReplacementFeaturesHostReachableServices {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesHostReachableServices)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesNat46X64) DeepCopyInto(out *KubeProxyReplacementFeaturesNat46X64) {
*out = *in
if in.Gateway != nil {
in, out := &in.Gateway, &out.Gateway
*out = new(KubeProxyReplacementFeaturesNat46X64Gateway)
(*in).DeepCopyInto(*out)
}
if in.Service != nil {
in, out := &in.Service, &out.Service
*out = new(KubeProxyReplacementFeaturesNat46X64Service)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesNat46X64.
func (in *KubeProxyReplacementFeaturesNat46X64) DeepCopy() *KubeProxyReplacementFeaturesNat46X64 {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesNat46X64)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeProxyReplacementFeaturesNat46X64Gateway) DeepCopyInto(out *KubeProxyReplacementFeaturesNat46X64Gateway) {
*out = *in
if in.Prefixes != nil {
in, out := &in.Prefixes, &out.Prefixes
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyReplacementFeaturesNat46X64Gateway.
func (in *KubeProxyReplacementFeaturesNat46X64Gateway) DeepCopy() *KubeProxyReplacementFeaturesNat46X64Gateway {
if in == nil {
return nil
}
out := new(KubeProxyReplacementFeaturesNat46X64Gateway)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Masquerading) DeepCopyInto(out *Masquerading) {
*out = *in
if in.EnabledProtocols != nil {
in, out := &in.EnabledProtocols, &out.EnabledProtocols
*out = new(MasqueradingEnabledProtocols)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Masquerading.
func (in *Masquerading) DeepCopy() *Masquerading {
if in == nil {
return nil
}
out := new(Masquerading)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in NamedPorts) DeepCopyInto(out *NamedPorts) {
{
in := &in
*out = make(NamedPorts, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Port)
**out = **in
}
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedPorts.
func (in NamedPorts) DeepCopy() NamedPorts {
if in == nil {
return nil
}
out := new(NamedPorts)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddressing) DeepCopyInto(out *NodeAddressing) {
*out = *in
if in.IPV4 != nil {
in, out := &in.IPV4, &out.IPV4
*out = new(NodeAddressingElement)
**out = **in
}
if in.IPV6 != nil {
in, out := &in.IPV6, &out.IPV6
*out = new(NodeAddressingElement)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddressing.
func (in *NodeAddressing) DeepCopy() *NodeAddressing {
if in == nil {
return nil
}
out := new(NodeAddressing)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeElement) DeepCopyInto(out *NodeElement) {
*out = *in
if in.HealthEndpointAddress != nil {
in, out := &in.HealthEndpointAddress, &out.HealthEndpointAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.IngressAddress != nil {
in, out := &in.IngressAddress, &out.IngressAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.PrimaryAddress != nil {
in, out := &in.PrimaryAddress, &out.PrimaryAddress
*out = new(NodeAddressing)
(*in).DeepCopyInto(*out)
}
if in.SecondaryAddresses != nil {
in, out := &in.SecondaryAddresses, &out.SecondaryAddresses
*out = make([]*NodeAddressingElement, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(NodeAddressingElement)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeElement.
func (in *NodeElement) DeepCopy() *NodeElement {
if in == nil {
return nil
}
out := new(NodeElement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxyStatistics) DeepCopyInto(out *ProxyStatistics) {
*out = *in
if in.Statistics != nil {
in, out := &in.Statistics, &out.Statistics
*out = new(RequestResponseStatistics)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatistics.
func (in *ProxyStatistics) DeepCopy() *ProxyStatistics {
if in == nil {
return nil
}
out := new(ProxyStatistics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) {
*out = *in
if in.Redirects != nil {
in, out := &in.Redirects, &out.Redirects
*out = make([]*ProxyRedirect, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ProxyRedirect)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus.
func (in *ProxyStatus) DeepCopy() *ProxyStatus {
if in == nil {
return nil
}
out := new(ProxyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteCluster) DeepCopyInto(out *RemoteCluster) {
*out = *in
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(RemoteClusterConfig)
(*in).DeepCopyInto(*out)
}
in.LastFailure.DeepCopyInto(&out.LastFailure)
if in.Synced != nil {
in, out := &in.Synced, &out.Synced
*out = new(RemoteClusterSynced)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteCluster.
func (in *RemoteCluster) DeepCopy() *RemoteCluster {
if in == nil {
return nil
}
out := new(RemoteCluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteClusterConfig) DeepCopyInto(out *RemoteClusterConfig) {
*out = *in
if in.ServiceExportsEnabled != nil {
in, out := &in.ServiceExportsEnabled, &out.ServiceExportsEnabled
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteClusterConfig.
func (in *RemoteClusterConfig) DeepCopy() *RemoteClusterConfig {
if in == nil {
return nil
}
out := new(RemoteClusterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteClusterSynced) DeepCopyInto(out *RemoteClusterSynced) {
*out = *in
if in.ServiceExports != nil {
in, out := &in.ServiceExports, &out.ServiceExports
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteClusterSynced.
func (in *RemoteClusterSynced) DeepCopy() *RemoteClusterSynced {
if in == nil {
return nil
}
out := new(RemoteClusterSynced)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RequestResponseStatistics) DeepCopyInto(out *RequestResponseStatistics) {
*out = *in
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = new(MessageForwardingStatistics)
**out = **in
}
if in.Responses != nil {
in, out := &in.Responses, &out.Responses
*out = new(MessageForwardingStatistics)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestResponseStatistics.
func (in *RequestResponseStatistics) DeepCopy() *RequestResponseStatistics {
if in == nil {
return nil
}
out := new(RequestResponseStatistics)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatusResponse) DeepCopyInto(out *StatusResponse) {
*out = *in
if in.AuthCertificateProvider != nil {
in, out := &in.AuthCertificateProvider, &out.AuthCertificateProvider
*out = new(Status)
**out = **in
}
if in.BandwidthManager != nil {
in, out := &in.BandwidthManager, &out.BandwidthManager
*out = new(BandwidthManager)
(*in).DeepCopyInto(*out)
}
if in.BpfMaps != nil {
in, out := &in.BpfMaps, &out.BpfMaps
*out = new(BPFMapStatus)
(*in).DeepCopyInto(*out)
}
if in.Cilium != nil {
in, out := &in.Cilium, &out.Cilium
*out = new(Status)
**out = **in
}
if in.ClockSource != nil {
in, out := &in.ClockSource, &out.ClockSource
*out = new(ClockSource)
**out = **in
}
if in.Cluster != nil {
in, out := &in.Cluster, &out.Cluster
*out = new(ClusterStatus)
(*in).DeepCopyInto(*out)
}
if in.ClusterMesh != nil {
in, out := &in.ClusterMesh, &out.ClusterMesh
*out = new(ClusterMeshStatus)
(*in).DeepCopyInto(*out)
}
if in.CniChaining != nil {
in, out := &in.CniChaining, &out.CniChaining
*out = new(CNIChainingStatus)
**out = **in
}
if in.CniFile != nil {
in, out := &in.CniFile, &out.CniFile
*out = new(Status)
**out = **in
}
if in.ContainerRuntime != nil {
in, out := &in.ContainerRuntime, &out.ContainerRuntime
*out = new(Status)
**out = **in
}
if in.Controllers != nil {
in, out := &in.Controllers, &out.Controllers
*out = make(ControllerStatuses, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ControllerStatus)
(*in).DeepCopyInto(*out)
}
}
}
if in.Encryption != nil {
in, out := &in.Encryption, &out.Encryption
*out = new(EncryptionStatus)
(*in).DeepCopyInto(*out)
}
if in.HostFirewall != nil {
in, out := &in.HostFirewall, &out.HostFirewall
*out = new(HostFirewall)
(*in).DeepCopyInto(*out)
}
if in.Hubble != nil {
in, out := &in.Hubble, &out.Hubble
*out = new(HubbleStatus)
(*in).DeepCopyInto(*out)
}
if in.IdentityRange != nil {
in, out := &in.IdentityRange, &out.IdentityRange
*out = new(IdentityRange)
**out = **in
}
if in.Ipam != nil {
in, out := &in.Ipam, &out.Ipam
*out = new(IPAMStatus)
(*in).DeepCopyInto(*out)
}
if in.IPV4BigTCP != nil {
in, out := &in.IPV4BigTCP, &out.IPV4BigTCP
*out = new(IPV4BigTCP)
**out = **in
}
if in.IPV6BigTCP != nil {
in, out := &in.IPV6BigTCP, &out.IPV6BigTCP
*out = new(IPV6BigTCP)
**out = **in
}
if in.KubeProxyReplacement != nil {
in, out := &in.KubeProxyReplacement, &out.KubeProxyReplacement
*out = new(KubeProxyReplacement)
(*in).DeepCopyInto(*out)
}
if in.Kubernetes != nil {
in, out := &in.Kubernetes, &out.Kubernetes
*out = new(K8sStatus)
(*in).DeepCopyInto(*out)
}
if in.Kvstore != nil {
in, out := &in.Kvstore, &out.Kvstore
*out = new(Status)
**out = **in
}
if in.Masquerading != nil {
in, out := &in.Masquerading, &out.Masquerading
*out = new(Masquerading)
(*in).DeepCopyInto(*out)
}
if in.NodeMonitor != nil {
in, out := &in.NodeMonitor, &out.NodeMonitor
*out = new(MonitorStatus)
**out = **in
}
if in.Proxy != nil {
in, out := &in.Proxy, &out.Proxy
*out = new(ProxyStatus)
(*in).DeepCopyInto(*out)
}
if in.Routing != nil {
in, out := &in.Routing, &out.Routing
*out = new(Routing)
**out = **in
}
if in.Srv6 != nil {
in, out := &in.Srv6, &out.Srv6
*out = new(Srv6)
**out = **in
}
if in.Stale != nil {
in, out := &in.Stale, &out.Stale
*out = make(map[string]strfmt.DateTime, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusResponse.
func (in *StatusResponse) DeepCopy() *StatusResponse {
if in == nil {
return nil
}
out := new(StatusResponse)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardInterface) DeepCopyInto(out *WireguardInterface) {
*out = *in
if in.Peers != nil {
in, out := &in.Peers, &out.Peers
*out = make([]*WireguardPeer, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(WireguardPeer)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardInterface.
func (in *WireguardInterface) DeepCopy() *WireguardInterface {
if in == nil {
return nil
}
out := new(WireguardInterface)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardPeer) DeepCopyInto(out *WireguardPeer) {
*out = *in
if in.AllowedIps != nil {
in, out := &in.AllowedIps, &out.AllowedIps
*out = make([]string, len(*in))
copy(*out, *in)
}
in.LastHandshakeTime.DeepCopyInto(&out.LastHandshakeTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardPeer.
func (in *WireguardPeer) DeepCopy() *WireguardPeer {
if in == nil {
return nil
}
out := new(WireguardPeer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WireguardStatus) DeepCopyInto(out *WireguardStatus) {
*out = *in
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make([]*WireguardInterface, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(WireguardInterface)
(*in).DeepCopyInto(*out)
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WireguardStatus.
func (in *WireguardStatus) DeepCopy() *WireguardStatus {
if in == nil {
return nil
}
out := new(WireguardStatus)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package models
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatusConfiguration) DeepEqual(other *ControllerStatusConfiguration) bool {
if other == nil {
return false
}
if in.ErrorRetry != other.ErrorRetry {
return false
}
if in.ErrorRetryBase != other.ErrorRetryBase {
return false
}
if in.Interval != other.Interval {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointHealth) DeepEqual(other *EndpointHealth) bool {
if other == nil {
return false
}
if in.Bpf != other.Bpf {
return false
}
if in.Connected != other.Connected {
return false
}
if in.OverallHealth != other.OverallHealth {
return false
}
if in.Policy != other.Policy {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointIdentifiers) DeepEqual(other *EndpointIdentifiers) bool {
if other == nil {
return false
}
if in.CniAttachmentID != other.CniAttachmentID {
return false
}
if in.ContainerID != other.ContainerID {
return false
}
if in.ContainerName != other.ContainerName {
return false
}
if in.DockerEndpointID != other.DockerEndpointID {
return false
}
if in.DockerNetworkID != other.DockerNetworkID {
return false
}
if in.K8sNamespace != other.K8sNamespace {
return false
}
if in.K8sPodName != other.K8sPodName {
return false
}
if in.PodName != other.PodName {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointStatusChange) DeepEqual(other *EndpointStatusChange) bool {
if other == nil {
return false
}
if in.Code != other.Code {
return false
}
if in.Message != other.Message {
return false
}
if in.State != other.State {
return false
}
if in.Timestamp != other.Timestamp {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NamedPorts) DeepEqual(other *NamedPorts) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Port) DeepEqual(other *Port) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Port != other.Port {
return false
}
if in.Protocol != other.Protocol {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cidr
import (
"bytes"
"fmt"
"net"
"slices"
)
// NewCIDR returns a new CIDR using a net.IPNet
func NewCIDR(ipnet *net.IPNet) *CIDR {
if ipnet == nil {
return nil
}
return &CIDR{ipnet}
}
func NewCIDRSlice(ipnets []*net.IPNet) []*CIDR {
if ipnets == nil {
return nil
}
cidrs := make([]*CIDR, len(ipnets))
for i, ipnet := range ipnets {
cidrs[i] = NewCIDR(ipnet)
}
return cidrs
}
func CIDRsToIPNets(cidrs []*CIDR) []*net.IPNet {
if cidrs == nil {
return nil
}
ipnets := make([]*net.IPNet, len(cidrs))
for i, cidr := range cidrs {
ipnets[i] = cidr.IPNet
}
return ipnets
}
// CIDR is a network CIDR representation based on net.IPNet
type CIDR struct {
*net.IPNet
}
// DeepEqual is an deepequal function, deeply comparing the receiver with other.
// in must be non-nil.
func (in *CIDR) DeepEqual(other *CIDR) bool {
if other == nil {
return false
}
if (in.IPNet == nil) != (other.IPNet == nil) {
return false
} else if in.IPNet != nil {
if !in.IPNet.IP.Equal(other.IPNet.IP) {
return false
}
inOnes, inBits := in.IPNet.Mask.Size()
otherOnes, otherBits := other.IPNet.Mask.Size()
return inOnes == otherOnes && inBits == otherBits
}
return true
}
// DeepCopy creates a deep copy of a CIDR
func (n *CIDR) DeepCopy() *CIDR {
if n == nil {
return nil
}
out := new(CIDR)
n.DeepCopyInto(out)
return out
}
// DeepCopyInto is a deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CIDR) DeepCopyInto(out *CIDR) {
*out = *in
if in.IPNet == nil {
return
}
out.IPNet = new(net.IPNet)
*out.IPNet = *in.IPNet
if in.IPNet.IP != nil {
in, out := &in.IPNet.IP, &out.IPNet.IP
*out = make(net.IP, len(*in))
copy(*out, *in)
}
if in.IPNet.Mask != nil {
in, out := &in.IPNet.Mask, &out.IPNet.Mask
*out = make(net.IPMask, len(*in))
copy(*out, *in)
}
}
// AvailableIPs returns the number of IPs available in a CIDR
func (n *CIDR) AvailableIPs() int {
ones, bits := n.Mask.Size()
return 1 << (bits - ones)
}
// Equal returns true if the receiver's CIDR equals the other CIDR.
func (n *CIDR) Equal(o *CIDR) bool {
if n == nil || o == nil {
return n == o
}
return Equal(n.IPNet, o.IPNet)
}
// Equal returns true if the n and o net.IPNet CIDRs are Equal.
func Equal(n, o *net.IPNet) bool {
if n == nil || o == nil {
return n == o
}
if n == o {
return true
}
return n.IP.Equal(o.IP) &&
bytes.Equal(n.Mask, o.Mask)
}
// ZeroNet generates a zero net.IPNet object for the given address family
func ZeroNet(family int) *net.IPNet {
switch family {
case FAMILY_V4:
return &net.IPNet{
IP: net.IPv4zero,
Mask: net.CIDRMask(0, 8*net.IPv4len),
}
case FAMILY_V6:
return &net.IPNet{
IP: net.IPv6zero,
Mask: net.CIDRMask(0, 8*net.IPv6len),
}
}
return nil
}
// ContainsAll returns true if 'ipNets1' contains all net.IPNet of 'ipNets2'
func ContainsAll(ipNets1, ipNets2 []*net.IPNet) bool {
for _, n2 := range ipNets2 {
if !slices.ContainsFunc(ipNets1, func(n1 *net.IPNet) bool {
return Equal(n2, n1)
}) {
return false
}
}
return true
}
// ParseCIDR parses the CIDR string using net.ParseCIDR
func ParseCIDR(str string) (*CIDR, error) {
_, ipnet, err := net.ParseCIDR(str)
if err != nil {
return nil, err
}
return NewCIDR(ipnet), nil
}
// MustParseCIDR parses the CIDR string using net.ParseCIDR and panics if the
// CIDR cannot be parsed
func MustParseCIDR(str string) *CIDR {
c, err := ParseCIDR(str)
if err != nil {
panic(fmt.Sprintf("Unable to parse CIDR '%s': %s", str, err))
}
return c
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cidr
func createIPNetMap(list []*CIDR) map[string]*CIDR {
m := map[string]*CIDR{}
for _, c := range list {
if c != nil {
m[c.String()] = c
}
}
return m
}
func listMissingIPNets(existing map[string]*CIDR, new []*CIDR) (missing []*CIDR) {
for _, c := range new {
if c != nil {
if _, ok := existing[c.String()]; !ok {
missing = append(missing, c)
}
}
}
return
}
// DiffCIDRLists compares an old and new list of CIDRs and returns the list of
// removed and added CIDRs
func DiffCIDRLists(old, new []*CIDR) (add, remove []*CIDR) {
add = listMissingIPNets(createIPNetMap(old), new)
remove = listMissingIPNets(createIPNetMap(new), old)
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"bytes"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"strings"
"go4.org/netipx"
"github.com/cilium/cilium/pkg/cidr"
)
//
// In this file, we define types and utilities for cluster-aware
// addressing which identifies network endpoints using IP address
// and an optional ClusterID. With this special addressing scheme,
// we can distinguish network endpoints (e.g. Pods) that have the
// same IP address, but belong to the different cluster.
//
// A "bare" IP address is still a valid identifier because there
// are cases that endpoints can be identified without ClusterID
// (e.g. network endpoint has a unique IP address). We can consider
// this as a special case that ClusterID "doesn't matter". ClusterID
// 0 is reserved for indicating that.
//
// AddrCluster is a type that holds a pair of IP and ClusterID.
// We should use this type as much as possible when we implement
// IP + Cluster addressing. We should avoid managing IP and ClusterID
// separately. Otherwise, it is very hard for code readers to see
// where we are using cluster-aware addressing.
type AddrCluster struct {
addr netip.Addr
clusterID uint32
}
const AddrClusterLen = 20
var (
errUnmarshalBadAddress = errors.New("AddrCluster.UnmarshalJSON: bad address")
errMarshalInvalidAddress = errors.New("AddrCluster.MarshalJSON: invalid address")
jsonZeroAddress = []byte("\"\"")
)
// MarshalJSON marshals the address as a string in the form
// <addr>@<clusterID>, e.g. "1.2.3.4@1"
func (a *AddrCluster) MarshalJSON() ([]byte, error) {
if !a.addr.IsValid() {
if a.clusterID != 0 {
return nil, errMarshalInvalidAddress
}
// AddrCluster{} is the zero value. Preserve this across the
// marshalling by returning an empty string.
return jsonZeroAddress, nil
}
var b bytes.Buffer
b.WriteByte('"')
b.WriteString(a.String())
b.WriteByte('"')
return b.Bytes(), nil
}
func (a *AddrCluster) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, jsonZeroAddress) {
return nil
}
if len(data) <= 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errUnmarshalBadAddress
}
// Drop the parens
data = data[1 : len(data)-1]
a2, err := ParseAddrCluster(string(data))
if err != nil {
return err
}
a.addr = a2.addr
a.clusterID = a2.clusterID
return nil
}
// ParseAddrCluster parses s as an IP + ClusterID and returns AddrCluster.
// The string s can be a bare IP string (any IP address format allowed in
// netip.ParseAddr()) or IP string + @ + ClusterID with decimal. Bare IP
// string is considered as IP string + @ + ClusterID = 0.
func ParseAddrCluster(s string) (AddrCluster, error) {
atIndex := strings.LastIndex(s, "@")
var (
addrStr string
clusterIDStr string
)
if atIndex == -1 {
// s may be a bare IP address string, still valid
addrStr = s
clusterIDStr = ""
} else {
// s may be a IP + ClusterID string
addrStr = s[:atIndex]
clusterIDStr = s[atIndex+1:]
}
addr, err := netip.ParseAddr(addrStr)
if err != nil {
return AddrCluster{}, err
}
if clusterIDStr == "" {
if atIndex != len(s)-1 {
return AddrCluster{addr: addr, clusterID: 0}, nil
} else {
// handle the invalid case like "10.0.0.0@"
return AddrCluster{}, fmt.Errorf("empty cluster ID")
}
}
clusterID64, err := strconv.ParseUint(clusterIDStr, 10, 32)
if err != nil {
return AddrCluster{}, err
}
return AddrCluster{addr: addr, clusterID: uint32(clusterID64)}, nil
}
// MustParseAddrCluster calls ParseAddr(s) and panics on error. It is
// intended for use in tests with hard-coded strings.
func MustParseAddrCluster(s string) AddrCluster {
addrCluster, err := ParseAddrCluster(s)
if err != nil {
panic(err)
}
return addrCluster
}
// AddrClusterFromIP parses the given net.IP using netipx.FromStdIP and returns
// AddrCluster with ClusterID = 0.
func AddrClusterFromIP(ip net.IP) (AddrCluster, bool) {
addr, ok := netipx.FromStdIP(ip)
if !ok {
return AddrCluster{}, false
}
return AddrCluster{addr: addr, clusterID: 0}, true
}
func MustAddrClusterFromIP(ip net.IP) AddrCluster {
addr, ok := AddrClusterFromIP(ip)
if !ok {
panic("cannot convert net.IP to AddrCluster")
}
return addr
}
// AddrClusterFrom creates AddrCluster from netip.Addr and ClusterID
func AddrClusterFrom(addr netip.Addr, clusterID uint32) AddrCluster {
return AddrCluster{addr: addr, clusterID: clusterID}
}
// Addr returns IP address part of AddrCluster as netip.Addr. This function
// exists for keeping backward compatibility between the existing components
// which are not aware of the cluster-aware addressing. Calling this function
// against the AddrCluster which has non-zero clusterID will lose the ClusterID
// information. It should be used with an extra care.
func (ac AddrCluster) Addr() netip.Addr {
return ac.addr
}
// ClusterID returns ClusterID part of AddrCluster as uint32. We should avoid
// using this function as much as possible and treat IP address and ClusterID
// together.
func (ac AddrCluster) ClusterID() uint32 {
return ac.clusterID
}
// Equal returns true when given AddrCluster has a same IP address and ClusterID
func (ac0 AddrCluster) Equal(ac1 AddrCluster) bool {
return ac0.addr == ac1.addr && ac0.clusterID == ac1.clusterID
}
// Less compares ac0 and ac1 and returns true if ac0 is lesser than ac1
func (ac0 AddrCluster) Less(ac1 AddrCluster) bool {
// First, compare the IP address part
if ret := ac0.addr.Compare(ac1.addr); ret == -1 {
return true
} else if ret == 1 {
return false
} else {
// If IP address is the same, compare ClusterID
return ac0.clusterID < ac1.clusterID
}
}
// This is an alias of Equal which only exists for satisfying deepequal-gen
func (ac0 *AddrCluster) DeepEqual(ac1 *AddrCluster) bool {
return ac0.Equal(*ac1)
}
// DeepCopyInto copies in to out
func (in *AddrCluster) DeepCopyInto(out *AddrCluster) {
if out == nil {
return
}
out.addr = in.addr
out.clusterID = in.clusterID
}
// DeepCopy returns a new copy of AddrCluster
func (in *AddrCluster) DeepCopy() *AddrCluster {
out := new(AddrCluster)
in.DeepCopyInto(out)
return out
}
// String returns the string representation of the AddrCluster. If
// AddrCluster.clusterID = 0, it returns bare IP address string. Otherwise, it
// returns IP string + "@" + ClusterID (e.g. 10.0.0.1@1)
func (ac AddrCluster) String() string {
if ac.clusterID == 0 {
return ac.addr.String()
}
return ac.addr.String() + "@" + strconv.FormatUint(uint64(ac.clusterID), 10)
}
// Is4 reports whether IP address part of AddrCluster is an IPv4 address.
func (ac AddrCluster) Is4() bool {
return ac.addr.Is4()
}
// Is6 reports whether IP address part of AddrCluster is an IPv6 address.
func (ac AddrCluster) Is6() bool {
return ac.addr.Is6()
}
// IsUnspecified reports whether IP address part of the AddrCluster is an
// unspecified address, either the IPv4 address "0.0.0.0" or the IPv6
// address "::".
func (ac AddrCluster) IsUnspecified() bool {
return ac.addr.IsUnspecified()
}
// As20 returns the AddrCluster in its 20-byte representation which consists
// of 16-byte IP address part from netip.Addr.As16 and 4-byte ClusterID part.
func (ac AddrCluster) As20() (ac20 [20]byte) {
addr16 := ac.addr.As16()
copy(ac20[:16], addr16[:])
ac20[16] = byte(ac.clusterID >> 24)
ac20[17] = byte(ac.clusterID >> 16)
ac20[18] = byte(ac.clusterID >> 8)
ac20[19] = byte(ac.clusterID)
return ac20
}
// AsNetIP returns the IP address part of AddCluster as a net.IP type. This
// function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the AddrCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (ac AddrCluster) AsNetIP() net.IP {
return ac.addr.AsSlice()
}
func (ac AddrCluster) AsPrefixCluster() PrefixCluster {
return PrefixClusterFrom(ac.addr, ac.addr.BitLen(), WithClusterID(ac.clusterID))
}
// PrefixCluster is a type that holds a pair of prefix and ClusterID.
// We should use this type as much as possible when we implement
// prefix + Cluster addressing. We should avoid managing prefix and
// ClusterID separately. Otherwise, it is very hard for code readers
// to see where we are using cluster-aware addressing.
type PrefixCluster struct {
prefix netip.Prefix
clusterID uint32
}
// ParsePrefixCluster parses s as an Prefix + ClusterID and returns PrefixCluster.
// The string s can be a bare IP prefix string (any prefix format allowed in
// netip.ParsePrefix()) or prefix string + @ + ClusterID with decimal. Bare prefix
// string is considered as prefix string + @ + ClusterID = 0.
func ParsePrefixCluster(s string) (PrefixCluster, error) {
atIndex := strings.LastIndex(s, "@")
var (
prefixStr string
clusterIDStr string
)
if atIndex == -1 {
// s may be a bare IP prefix string, still valid
prefixStr = s
clusterIDStr = ""
} else {
// s may be a prefix + ClusterID string
prefixStr = s[:atIndex]
clusterIDStr = s[atIndex+1:]
}
prefix, err := netip.ParsePrefix(prefixStr)
if err != nil {
return PrefixCluster{}, err
}
if clusterIDStr == "" {
if atIndex != len(s)-1 {
return PrefixCluster{prefix: prefix, clusterID: 0}, nil
} else {
// handle the invalid case like "10.0.0.0/24@"
return PrefixCluster{}, fmt.Errorf("empty cluster ID")
}
}
clusterID64, err := strconv.ParseUint(clusterIDStr, 10, 32)
if err != nil {
return PrefixCluster{}, err
}
return PrefixCluster{prefix: prefix, clusterID: uint32(clusterID64)}, nil
}
// MustParsePrefixCluster calls ParsePrefixCluster(s) and panics on error.
// It is intended for use in tests with hard-coded strings.
func MustParsePrefixCluster(s string) PrefixCluster {
prefixCluster, err := ParsePrefixCluster(s)
if err != nil {
panic(err)
}
return prefixCluster
}
func (pc PrefixCluster) IsSingleIP() bool {
return pc.prefix.IsSingleIP()
}
type PrefixClusterOpts func(*PrefixCluster)
func WithClusterID(id uint32) PrefixClusterOpts {
return func(pc *PrefixCluster) { pc.clusterID = id }
}
func PrefixClusterFrom(addr netip.Addr, bits int, opts ...PrefixClusterOpts) PrefixCluster {
pc := PrefixCluster{prefix: netip.PrefixFrom(addr, bits)}
for _, opt := range opts {
opt(&pc)
}
return pc
}
func PrefixClusterFromCIDR(c *cidr.CIDR, opts ...PrefixClusterOpts) PrefixCluster {
if c == nil {
return PrefixCluster{}
}
addr, ok := netipx.FromStdIP(c.IP)
if !ok {
return PrefixCluster{}
}
ones, _ := c.Mask.Size()
return PrefixClusterFrom(addr, ones, opts...)
}
func (pc0 PrefixCluster) Equal(pc1 PrefixCluster) bool {
return pc0.prefix == pc1.prefix && pc0.clusterID == pc1.clusterID
}
func (pc PrefixCluster) IsValid() bool {
return pc.prefix.IsValid()
}
func (pc PrefixCluster) AddrCluster() AddrCluster {
return AddrClusterFrom(pc.prefix.Addr(), pc.clusterID)
}
func (pc PrefixCluster) ClusterID() uint32 {
return pc.clusterID
}
func (pc PrefixCluster) String() string {
if pc.clusterID == 0 {
return pc.prefix.String()
}
return pc.prefix.String() + "@" + strconv.FormatUint(uint64(pc.clusterID), 10)
}
// AsPrefix returns the IP prefix part of PrefixCluster as a netip.Prefix type.
// This function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the PrefixCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (pc PrefixCluster) AsPrefix() netip.Prefix {
return netip.PrefixFrom(pc.prefix.Addr(), pc.prefix.Bits())
}
// AsIPNet returns the IP prefix part of PrefixCluster as a net.IPNet type. This
// function exists for keeping backward compatibility between the existing
// components which are not aware of the cluster-aware addressing. Calling
// this function against the PrefixCluster which has non-zero clusterID will
// lose the ClusterID information. It should be used with an extra care.
func (pc PrefixCluster) AsIPNet() net.IPNet {
return *netipx.PrefixIPNet(pc.AsPrefix())
}
// This function is solely exists for annotating IPCache's key string with ClusterID.
// IPCache's key string is IP address or Prefix string (10.0.0.1 and 10.0.0.0/32 are
// different entry). This function assumes given string is one of those format and
// just put @<ClusterID> suffix and there's no format check for performance reason.
// User must make sure the input is a valid IP or Prefix string.
//
// We should eventually remove this function once we finish refactoring IPCache and
// stop using string as a key. At that point, we should consider using PrefixCluster
// type for IPCache's key.
func AnnotateIPCacheKeyWithClusterID(key string, clusterID uint32) string {
return key + "@" + strconv.FormatUint(uint64(clusterID), 10)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"errors"
"fmt"
"github.com/spf13/pflag"
"github.com/cilium/cilium/pkg/defaults"
ipamOption "github.com/cilium/cilium/pkg/ipam/option"
)
const (
// OptClusterName is the name of the OptClusterName option
OptClusterName = "cluster-name"
// OptClusterID is the name of the OptClusterID option
OptClusterID = "cluster-id"
// OptMaxConnectedClusters is the name of the OptMaxConnectedClusters option
OptMaxConnectedClusters = "max-connected-clusters"
)
// ClusterInfo groups together the ClusterID and the ClusterName
type ClusterInfo struct {
ID uint32 `mapstructure:"cluster-id"`
Name string `mapstructure:"cluster-name"`
MaxConnectedClusters uint32 `mapstructure:"max-connected-clusters"`
}
// DefaultClusterInfo represents the default ClusterInfo values.
var DefaultClusterInfo = ClusterInfo{
ID: 0,
Name: defaults.ClusterName,
MaxConnectedClusters: defaults.MaxConnectedClusters,
}
// Flags implements the cell.Flagger interface, to register the given flags.
func (def ClusterInfo) Flags(flags *pflag.FlagSet) {
flags.Uint32(OptClusterID, def.ID, "Unique identifier of the cluster")
flags.String(OptClusterName, def.Name, "Name of the cluster. It must consist of at most 32 lower case alphanumeric characters and '-', start and end with an alphanumeric character.")
flags.Uint32(OptMaxConnectedClusters, def.MaxConnectedClusters, "Maximum number of clusters to be connected in a clustermesh. Increasing this value will reduce the maximum number of identities available. Valid configurations are [255, 511].")
}
// Validate validates that the ClusterID is in the valid range (including ClusterID == 0),
// and that the ClusterName is different from the default value if the ClusterID != 0.
func (c ClusterInfo) Validate() error {
if c.ID < ClusterIDMin || c.ID > ClusterIDMax {
return fmt.Errorf("invalid cluster id %d: must be in range %d..%d",
c.ID, ClusterIDMin, ClusterIDMax)
}
return c.validateName()
}
// ValidateStrict validates that the ClusterID is in the valid range, but not 0,
// and that the ClusterName is different from the default value.
func (c ClusterInfo) ValidateStrict() error {
if err := ValidateClusterID(c.ID); err != nil {
return err
}
return c.validateName()
}
// ValidateBuggyClusterID returns an error if a buggy cluster ID (i.e., with the
// 7th bit set) is used in combination with ENI IPAM mode or AWS CNI chaining.
func (c ClusterInfo) ValidateBuggyClusterID(ipamMode, chainingMode string) error {
if (c.ID&0x80) != 0 && (ipamMode == ipamOption.IPAMENI || ipamMode == ipamOption.IPAMAlibabaCloud || chainingMode == "aws-cni") {
return errors.New("Cilium is currently affected by a bug that causes traffic matched " +
"by network policies to be incorrectly dropped when running in either ENI mode (both " +
"AWS and AlibabaCloud) or AWS VPC CNI chaining mode, if the cluster ID is 128-255 (and " +
"384-511 when max-connected-clusters=511). " +
"Please refer to https://github.com/cilium/cilium/issues/21330 for additional details.")
}
return nil
}
func (c ClusterInfo) validateName() error {
if err := ValidateClusterName(c.Name); err != nil {
return fmt.Errorf("invalid cluster name: %w", err)
}
if c.ID != 0 && c.Name == defaults.ClusterName {
return fmt.Errorf("cannot use default cluster name (%s) with option %s",
defaults.ClusterName, OptClusterID)
}
return nil
}
// ExtendedClusterMeshEnabled returns true if MaxConnectedClusters value has
// been set to a value larger than the default 255.
func (c ClusterInfo) ExtendedClusterMeshEnabled() bool {
return c.MaxConnectedClusters != defaults.MaxConnectedClusters
}
// ValidateRemoteConfig validates the remote CiliumClusterConfig to ensure
// compatibility with this cluster's configuration.
func (c ClusterInfo) ValidateRemoteConfig(config CiliumClusterConfig) error {
if err := ValidateClusterID(config.ID); err != nil {
return err
}
if c.ExtendedClusterMeshEnabled() && (c.MaxConnectedClusters != config.Capabilities.MaxConnectedClusters) {
return fmt.Errorf("mismatched MaxConnectedClusters; local=%d, remote=%d", c.MaxConnectedClusters, config.Capabilities.MaxConnectedClusters)
}
return nil
}
// QuirksConfig allows the user to configure how Cilium behaves when a set
// of incompatible options are configured together into the agent.
type QuirksConfig struct {
// AllowUnsafePolicySKBUsage determines whether to hard-fail startup
// due to detection of a configuration combination that may trigger
// connection impact in the dataplane due to clustermesh IDs
// conflicting with other usage of skb->mark field. See GH-21330.
AllowUnsafePolicySKBUsage bool
}
var DefaultQuirks = QuirksConfig{
AllowUnsafePolicySKBUsage: false,
}
func (_ QuirksConfig) Flags(flags *pflag.FlagSet) {
flags.Bool("allow-unsafe-policy-skb-usage", false,
"Allow the daemon to continue to operate even if conflicting "+
"clustermesh ID configuration is detected which may "+
"impact the ability for Cilium to enforce network "+
"policy both within and across clusters")
flags.MarkHidden("allow-unsafe-policy-skb-usage")
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package types
import (
"errors"
"fmt"
"regexp"
"github.com/cilium/cilium/pkg/defaults"
)
const (
// ClusterIDMin is the minimum value of the cluster ID
ClusterIDMin = 0
ClusterIDExt511 = 511
ClusterIDUnset = ClusterIDMin
)
// ClusterIDMax is the maximum value of the cluster ID
var ClusterIDMax uint32 = defaults.MaxConnectedClusters
// A cluster name must respect the following constraints:
// * It must contain at most 32 characters;
// * It must begin and end with a lower case alphanumeric character;
// * It may contain lower case alphanumeric characters and dashes between.
const (
// clusterNameMaxLength is the maximum allowed length of a cluster name.
clusterNameMaxLength = 32
// clusterNameRegexStr is the regex to validate a cluster name.
clusterNameRegexStr = `^([a-z0-9][-a-z0-9]*)?[a-z0-9]$`
)
var clusterNameRegex = regexp.MustCompile(clusterNameRegexStr)
// InitClusterIDMax validates and sets the ClusterIDMax package level variable.
func (c ClusterInfo) InitClusterIDMax() error {
switch c.MaxConnectedClusters {
case defaults.MaxConnectedClusters, ClusterIDExt511:
ClusterIDMax = c.MaxConnectedClusters
default:
return fmt.Errorf("--%s=%d is invalid; supported values are [%d, %d]", OptMaxConnectedClusters, c.MaxConnectedClusters, defaults.MaxConnectedClusters, ClusterIDExt511)
}
return nil
}
// ValidateClusterID ensures that the given clusterID is within the configured
// range of the ClusterMesh.
func ValidateClusterID(clusterID uint32) error {
if clusterID == ClusterIDMin {
return fmt.Errorf("ClusterID %d is reserved", ClusterIDMin)
}
if clusterID > ClusterIDMax {
return fmt.Errorf("ClusterID > %d is not supported", ClusterIDMax)
}
return nil
}
// ValidateClusterName validates that the given name matches the cluster name specifications.
func ValidateClusterName(name string) error {
if name == "" {
return errors.New("must not be empty")
}
if len(name) > clusterNameMaxLength {
return fmt.Errorf("must not be more than %d characters", clusterNameMaxLength)
}
if !clusterNameRegex.MatchString(name) {
return errors.New("must consist of lower case alphanumeric characters and '-', and must start and end with an alphanumeric character")
}
return nil
}
type CiliumClusterConfig struct {
ID uint32 `json:"id,omitempty"`
Capabilities CiliumClusterConfigCapabilities `json:"capabilities,omitempty"`
}
type CiliumClusterConfigCapabilities struct {
// Supports per-prefix "synced" canaries
SyncedCanaries bool `json:"syncedCanaries,omitempty"`
// The information concerning the given cluster is cached from an external
// kvstore (for instance, by kvstoremesh). This implies that keys are stored
// under the dedicated "cilium/cache" prefix, and all are cluster-scoped.
Cached bool `json:"cached,omitempty"`
// The maximum number of clusters the given cluster can support in a ClusterMesh.
MaxConnectedClusters uint32 `json:"maxConnectedClusters,omitempty"`
// Whether or not MCS-API ServiceExports is enabled by the cluster.
// Additionally a nil values means that it's not supported.
ServiceExportsEnabled *bool `json:"serviceExportsEnabled,omitempty"`
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package command
import (
"encoding/json"
"errors"
"fmt"
"regexp"
"strings"
"unicode"
"github.com/spf13/cast"
"github.com/spf13/viper"
)
const (
comma = ','
equal = '='
)
var keyValueRegex = regexp.MustCompile(`([\w-:;./@]+=([\w-:;,./@][\w-:;,./@ ]*[\w-:;,./@])?[\w-:;,./@]*,)*([\w-:;./@]+=([\w-:;,./@][\w-:;,./@ ]*)?[\w-:;./@]+)$`)
// GetStringMapString contains one enhancement to support k1=v2,k2=v2 compared to original
// implementation of GetStringMapString function
// Related upstream issue https://github.com/spf13/viper/issues/911
func GetStringMapString(vp *viper.Viper, key string) map[string]string {
v, _ := GetStringMapStringE(vp, key)
return v
}
// GetStringMapStringE is same as GetStringMapString, but with error
func GetStringMapStringE(vp *viper.Viper, key string) (map[string]string, error) {
return ToStringMapStringE(vp.Get(key))
}
// ToStringMapStringE casts an interface to a map[string]string type. The underlying
// interface type might be a map or string. In the latter case, it is attempted to be
// json decoded, falling back to the k1=v2,k2=v2 format in case it doesn't look like json.
func ToStringMapStringE(data interface{}) (map[string]string, error) {
if data == nil {
return map[string]string{}, nil
}
v, err := cast.ToStringMapStringE(data)
if err != nil {
var syntaxErr *json.SyntaxError
if !errors.As(err, &syntaxErr) {
return v, err
}
switch s := data.(type) {
case string:
if len(s) == 0 {
return map[string]string{}, nil
}
// if the input is starting with either '{' or '[', just preserve original json parsing error.
firstIndex := strings.IndexFunc(s, func(r rune) bool {
return !unicode.IsSpace(r)
})
if firstIndex != -1 && (s[firstIndex] == '{' || s[firstIndex] == '[') {
return v, err
}
if !isValidKeyValuePair(s) {
return map[string]string{}, fmt.Errorf("'%s' is not formatted as key=value,key1=value1", s)
}
var v = map[string]string{}
kvs := splitKeyValue(s, comma, equal)
for _, kv := range kvs {
temp := strings.Split(kv, string(equal))
if len(temp) != 2 {
return map[string]string{}, fmt.Errorf("'%s' in '%s' is not formatted as key=value,key1=value1", kv, s)
}
v[temp[0]] = temp[1]
}
return v, nil
}
}
return v, nil
}
// isValidKeyValuePair returns true if the input is following key1=value1,key2=value2,...,keyN=valueN format.
func isValidKeyValuePair(str string) bool {
if len(str) == 0 {
return true
}
return len(keyValueRegex.ReplaceAllString(str, "")) == 0
}
// splitKeyValue is similar to strings.Split, but looks ahead to make sure
// that sep character is allowed in value component of key-value pair.
//
// Example: with the input "c6a.2xlarge=4,15,15,m4.xlarge=2,4,8",
// - strings.Split function will return []string{"c6a.2xlarge=4", "15", "15", "m4.xlarge=2", "4", "8"}.
// - splitKeyValue function will return []string{"c6a.2xlarge=4,15,15", "m4.xlarge=2,4,8"} instead.
func splitKeyValue(str string, sep rune, keyValueSep rune) []string {
var sepIndexes, kvValueSepIndexes []int
// find all indexes of separator character
for i := 0; i < len(str); i++ {
switch int32(str[i]) {
case sep:
sepIndexes = append(sepIndexes, i)
case keyValueSep:
kvValueSepIndexes = append(kvValueSepIndexes, i)
}
}
// there's only a single key-value if there are no separators ("key=value")
// or a single key-value separator ("key=option1:value1,option2:value2")
if len(sepIndexes) == 0 || len(kvValueSepIndexes) == 1 {
return []string{str}
}
if len(sepIndexes) == 1 {
index := sepIndexes[0]
return []string{str[:index], str[index+1:]}
}
var res []string
var start = 0
for i := 0; i < len(sepIndexes); i++ {
last := len(str)
if i < len(sepIndexes)-1 {
last = sepIndexes[i+1]
}
if strings.ContainsRune(str[sepIndexes[i]:last], keyValueSep) {
res = append(res, str[start:sepIndexes[i]])
start = sepIndexes[i] + 1
}
}
// append the remaining for last sep index
res = append(res, str[start:])
return res
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package command
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"
"k8s.io/client-go/util/jsonpath"
)
var (
outputOpt string
re = regexp.MustCompile(`^jsonpath\=(.*)`)
)
// OutputOption returns true if an output option was specified.
func OutputOption() bool {
return len(outputOpt) > 0
}
// OutputOptionString returns the output option as a string
func OutputOptionString() string {
if outputOpt == "yaml" {
return "YAML"
}
if outputOpt == "json" || re.MatchString(outputOpt) {
return "JSON"
}
return "unknown"
}
// AddOutputOption adds the -o|--output option to any cmd to export to json or yaml.
func AddOutputOption(cmd *cobra.Command) {
cmd.Flags().StringVarP(&outputOpt, "output", "o", "", "json| yaml| jsonpath='{}'")
}
// ForceJSON sets output mode to JSON (for unit tests)
func ForceJSON() {
outputOpt = "json"
}
// PrintOutput receives an interface and dump the data using the --output flag.
// ATM only json or jsonpath. In the future yaml
func PrintOutput(data interface{}) error {
return PrintOutputWithType(data, outputOpt)
}
// PrintOutputWithPatch merges data with patch and dump the data using the --output flag.
func PrintOutputWithPatch(data interface{}, patch interface{}) error {
mergedInterface, err := mergeInterfaces(data, patch)
if err != nil {
return fmt.Errorf("Unable to merge Interfaces: %w", err)
}
return PrintOutputWithType(mergedInterface, outputOpt)
}
func mergeInterfaces(data, patch interface{}) (interface{}, error) {
var i1, i2 interface{}
data1, err := json.Marshal(data)
if err != nil {
return nil, err
}
data2, err := json.Marshal(patch)
if err != nil {
return nil, err
}
err = json.Unmarshal(data1, &i1)
if err != nil {
return nil, err
}
err = json.Unmarshal(data2, &i2)
if err != nil {
return nil, err
}
return recursiveMerge(i1, i2), nil
}
func recursiveMerge(i1, i2 interface{}) interface{} {
switch i1 := i1.(type) {
case map[string]interface{}:
i2, ok := i2.(map[string]interface{})
if !ok {
return i1
}
for k, v2 := range i2 {
if v1, ok := i1[k]; ok {
i1[k] = recursiveMerge(v1, v2)
} else {
i1[k] = v2
}
}
case nil:
i2, ok := i2.(map[string]interface{})
if ok {
return i2
}
}
return i1
}
// PrintOutputWithType receives an interface and dump the data using the --output flag.
// ATM only json, yaml, or jsonpath.
func PrintOutputWithType(data interface{}, outputType string) error {
if outputType == "json" {
return dumpJSON(data, "")
}
if outputType == "yaml" {
return dumpYAML(data)
}
if re.MatchString(outputType) {
return dumpJSON(data, re.ReplaceAllString(outputType, "$1"))
}
return fmt.Errorf("couldn't find output printer")
}
// DumpJSONToString dumps the contents of data into a string. If jsonpath is
// non-empty, will attempt to do jsonpath filtering using said string. Returns a
// string containing the JSON in data, or an error if any JSON marshaling,
// parsing operations fail.
func DumpJSONToString(data interface{}, jsonPath string) (string, error) {
if len(jsonPath) == 0 {
result, err := json.MarshalIndent(data, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't marshal to json: '%s'\n", err)
return "", err
}
fmt.Println(string(result))
return "", nil
}
parser := jsonpath.New("").AllowMissingKeys(true)
if err := parser.Parse(jsonPath); err != nil {
fmt.Fprintf(os.Stderr, "Couldn't parse jsonpath expression: '%s'\n", err)
return "", err
}
var sb strings.Builder
if err := parser.Execute(&sb, data); err != nil {
fmt.Fprintf(os.Stderr, "Couldn't parse jsonpath expression: '%s'\n", err)
return "", err
}
return sb.String(), nil
}
// dumpJSON dumps the data variable to the stdout as json.
// If something fails, it returns an error
// If jsonPath is passed, it runs the json query over data var.
func dumpJSON(data interface{}, jsonPath string) error {
jsonStr, err := DumpJSONToString(data, jsonPath)
if err != nil {
return err
}
fmt.Println(jsonStr)
return nil
}
// dumpYAML dumps the data variable to the stdout as yaml.
// If something fails, it returns an error
func dumpYAML(data interface{}) error {
result, err := yaml.Marshal(data)
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't marshal to yaml: '%s'\n", err)
return err
}
fmt.Println(string(result))
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cache
import (
"sync"
)
const (
cacheSize = 512
cacheMask = cacheSize - 1
)
func New[T any](hashfn func(T) uint64, skipfn func(x T) bool, eqfn func(a, b T) bool) *Cache[T] {
return &Cache[T]{
hashfn: hashfn,
eqfn: eqfn,
skipfn: skipfn,
pool: sync.Pool{New: func() any {
var arr [cacheSize]T
return &arr
}},
}
}
// Cache is a simple fixed size cache for efficient deduplication of objects.
type Cache[T any] struct {
// pool of cache arrays. Pool is used here as it provides a very efficient
// shared access to a set of "cache arrays", and under low memory scenarios
// allows the Go runtime to drop the caches.
pool sync.Pool
skipfn func(T) bool
hashfn func(T) uint64
eqfn func(a, b T) bool
}
// Get a cached object if any. If Get() was called previously with an object equal to [x]
// and it is found from the cache then it is returned, otherwise [x] is inserted into
// cache.
func (c *Cache[T]) Get(x T) T {
if c.skipfn != nil && c.skipfn(x) {
return x
}
x, _ = c.get(x)
return x
}
func (c *Cache[T]) get(x T) (T, uint64) {
hash := c.hashfn(x)
arr := c.pool.Get().(*[cacheSize]T)
idx := hash & cacheMask
v := (*arr)[idx]
if !c.eqfn(x, v) {
(*arr)[idx] = x
v = x
}
c.pool.Put(arr)
return v, hash
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package cache
import (
"maps"
"github.com/cespare/xxhash/v2"
)
var (
Strings = New(
xxhash.Sum64String,
func(s string) bool {
// Skip caching of long strings
return len(s) > 256
},
func(a, b string) bool { return a == b },
)
StringMaps = New(
func(m map[string]string) (hash uint64) {
for k, v := range m {
_, hashk := Strings.get(k)
_, hashv := Strings.get(v)
hash = hash ^ hashk ^ hashv
}
return
},
func(m map[string]string) bool {
// Skip caching of large maps
return len(m) > 32
},
maps.Equal,
)
)
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package safenetlink
import (
"context"
"errors"
"net"
"github.com/vishvananda/netlink"
"github.com/vishvananda/netlink/nl"
"github.com/cilium/cilium/pkg/resiliency"
"github.com/cilium/cilium/pkg/time"
)
const (
netlinkRetryInterval = 1 * time.Millisecond
netlinkRetryMax = 30
)
// WithRetry runs the netlinkFunc. If netlinkFunc returns netlink.ErrDumpInterrupted, the function is retried.
// If success or any other error is returned, WithRetry returns immediately, propagating the error.
func WithRetry(netlinkFunc func() error) error {
return resiliency.Retry(context.Background(), netlinkRetryInterval, netlinkRetryMax, func(ctx context.Context, retries int) (bool, error) {
err := netlinkFunc()
if errors.Is(err, netlink.ErrDumpInterrupted) {
return false, nil // retry
}
return true, err
})
}
// WithRetryResult works like WithRetry, but allows netlinkFunc to have a return value besides the error
func WithRetryResult[T any](netlinkFunc func() (T, error)) (out T, err error) {
err = WithRetry(func() error {
out, err = netlinkFunc()
return err
})
return out, err
}
// AddrList wraps netlink.AddrList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func AddrList(link netlink.Link, family int) ([]netlink.Addr, error) {
return WithRetryResult(func() ([]netlink.Addr, error) {
return netlink.AddrList(link, family)
})
}
// BridgeVlanList wraps netlink.BridgeVlanList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
return WithRetryResult(func() (map[int32][]*nl.BridgeVlanInfo, error) {
return netlink.BridgeVlanList()
})
}
// ChainList wraps netlink.ChainList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func ChainList(link netlink.Link, parent uint32) ([]netlink.Chain, error) {
return WithRetryResult(func() ([]netlink.Chain, error) {
return netlink.ChainList(link, parent)
})
}
// ClassList wraps netlink.ClassList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func ClassList(link netlink.Link, parent uint32) ([]netlink.Class, error) {
return WithRetryResult(func() ([]netlink.Class, error) {
return netlink.ClassList(link, parent)
})
}
// ConntrackTableList wraps netlink.ConntrackTableList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func ConntrackTableList(table netlink.ConntrackTableType, family netlink.InetFamily) ([]*netlink.ConntrackFlow, error) {
return WithRetryResult(func() ([]*netlink.ConntrackFlow, error) {
return netlink.ConntrackTableList(table, family)
})
}
// DevLinkGetDeviceList wraps netlink.DevLinkGetDeviceList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func DevLinkGetDeviceList() ([]*netlink.DevlinkDevice, error) {
return WithRetryResult(func() ([]*netlink.DevlinkDevice, error) {
return netlink.DevLinkGetDeviceList()
})
}
// DevLinkGetAllPortList wraps netlink.DevLinkGetAllPortList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func DevLinkGetAllPortList() ([]*netlink.DevlinkPort, error) {
return WithRetryResult(func() ([]*netlink.DevlinkPort, error) {
return netlink.DevLinkGetAllPortList()
})
}
// DevlinkGetDeviceParams wraps netlink.DevlinkGetDeviceParams, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func DevlinkGetDeviceParams(bus string, device string) ([]*netlink.DevlinkParam, error) {
return WithRetryResult(func() ([]*netlink.DevlinkParam, error) {
return netlink.DevlinkGetDeviceParams(bus, device)
})
}
// FilterList wraps netlink.FilterList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func FilterList(link netlink.Link, parent uint32) ([]netlink.Filter, error) {
return WithRetryResult(func() ([]netlink.Filter, error) {
return netlink.FilterList(link, parent)
})
}
// FouList wraps netlink.FouList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func FouList(fam int) ([]netlink.Fou, error) {
return WithRetryResult(func() ([]netlink.Fou, error) {
return netlink.FouList(fam)
})
}
// GenlFamilyList wraps netlink.GenlFamilyList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func GenlFamilyList() ([]*netlink.GenlFamily, error) {
return WithRetryResult(func() ([]*netlink.GenlFamily, error) {
return netlink.GenlFamilyList()
})
}
// GTPPDPList wraps netlink.GTPPDPList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func GTPPDPList() ([]*netlink.PDP, error) {
return WithRetryResult(func() ([]*netlink.PDP, error) {
return netlink.GTPPDPList()
})
}
// LinkByName wraps netlink.LinkByName, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkByName(name string) (netlink.Link, error) {
return WithRetryResult(func() (netlink.Link, error) {
return netlink.LinkByName(name)
})
}
// LinkByAlias wraps netlink.LinkByAlias, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkByAlias(alias string) (netlink.Link, error) {
return WithRetryResult(func() (netlink.Link, error) {
return netlink.LinkByAlias(alias)
})
}
// LinkList wraps netlink.LinkList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkList() ([]netlink.Link, error) {
return WithRetryResult(func() ([]netlink.Link, error) {
return netlink.LinkList()
})
}
// LinkSubscribeWithOptions wraps netlink.LinkSubscribeWithOptions, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkSubscribeWithOptions(ch chan<- netlink.LinkUpdate, done <-chan struct{}, options netlink.LinkSubscribeOptions) error {
return WithRetry(func() error {
return netlink.LinkSubscribeWithOptions(ch, done, options)
})
}
// NeighList wraps netlink.NeighList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func NeighList(linkIndex, family int) ([]netlink.Neigh, error) {
return WithRetryResult(func() ([]netlink.Neigh, error) {
return netlink.NeighList(linkIndex, family)
})
}
// NeighProxyList wraps netlink.NeighProxyList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func NeighProxyList(linkIndex, family int) ([]netlink.Neigh, error) {
return WithRetryResult(func() ([]netlink.Neigh, error) {
return netlink.NeighProxyList(linkIndex, family)
})
}
// NeighListExecute wraps netlink.NeighListExecute, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func NeighListExecute(msg netlink.Ndmsg) ([]netlink.Neigh, error) {
return WithRetryResult(func() ([]netlink.Neigh, error) {
return netlink.NeighListExecute(msg)
})
}
// NeighSubscribeWithOptions wraps netlink.NeighSubscribeWithOptions, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func NeighSubscribeWithOptions(ch chan<- netlink.NeighUpdate, done <-chan struct{}, options netlink.NeighSubscribeOptions) error {
return WithRetry(func() error {
return netlink.NeighSubscribeWithOptions(ch, done, options)
})
}
// LinkGetProtinfo wraps netlink.LinkGetProtinfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func LinkGetProtinfo(link netlink.Link) (netlink.Protinfo, error) {
return WithRetryResult(func() (netlink.Protinfo, error) {
return netlink.LinkGetProtinfo(link)
})
}
// QdiscList wraps netlink.QdiscList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func QdiscList(link netlink.Link) ([]netlink.Qdisc, error) {
return WithRetryResult(func() ([]netlink.Qdisc, error) {
return netlink.QdiscList(link)
})
}
// RdmaLinkList wraps netlink.RdmaLinkList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RdmaLinkList() ([]*netlink.RdmaLink, error) {
return WithRetryResult(func() ([]*netlink.RdmaLink, error) {
return netlink.RdmaLinkList()
})
}
// RdmaLinkByName wraps netlink.RdmaLinkByName, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RdmaLinkByName(name string) (*netlink.RdmaLink, error) {
return WithRetryResult(func() (*netlink.RdmaLink, error) {
return netlink.RdmaLinkByName(name)
})
}
// RdmaLinkDel wraps netlink.RdmaLinkDel, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RdmaLinkDel(name string) error {
return WithRetry(func() error {
return netlink.RdmaLinkDel(name)
})
}
// RouteList wraps netlink.RouteList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RouteList(link netlink.Link, family int) ([]netlink.Route, error) {
return WithRetryResult(func() ([]netlink.Route, error) {
return netlink.RouteList(link, family)
})
}
// RouteListFiltered wraps netlink.RouteListFiltered, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RouteListFiltered(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) {
return WithRetryResult(func() ([]netlink.Route, error) {
return netlink.RouteListFiltered(family, filter, filterMask)
})
}
// RouteListFilteredIter wraps netlink.RouteListFilteredIter, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RouteListFilteredIter(family int, filter *netlink.Route, filterMask uint64, f func(netlink.Route) (cont bool)) error {
return WithRetry(func() error {
return netlink.RouteListFilteredIter(family, filter, filterMask, f)
})
}
// RouteSubscribeWithOptions wraps netlink.RouteSubscribeWithOptions, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RouteSubscribeWithOptions(ch chan<- netlink.RouteUpdate, done <-chan struct{}, options netlink.RouteSubscribeOptions) error {
return WithRetry(func() error {
return netlink.RouteSubscribeWithOptions(ch, done, options)
})
}
// RuleList wraps netlink.RuleList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RuleList(family int) ([]netlink.Rule, error) {
return WithRetryResult(func() ([]netlink.Rule, error) {
return netlink.RuleList(family)
})
}
// RuleListFiltered wraps netlink.RuleListFiltered, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func RuleListFiltered(family int, filter *netlink.Rule, filterMask uint64) ([]netlink.Rule, error) {
return WithRetryResult(func() ([]netlink.Rule, error) {
return netlink.RuleListFiltered(family, filter, filterMask)
})
}
// SocketGet wraps netlink.SocketGet, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketGet(local, remote net.Addr) (*netlink.Socket, error) {
return WithRetryResult(func() (*netlink.Socket, error) {
return netlink.SocketGet(local, remote)
})
}
// SocketDiagTCPInfo wraps netlink.SocketDiagTCPInfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagTCPInfo(family uint8) ([]*netlink.InetDiagTCPInfoResp, error) {
return WithRetryResult(func() ([]*netlink.InetDiagTCPInfoResp, error) {
return netlink.SocketDiagTCPInfo(family)
})
}
// SocketDiagTCP wraps netlink.SocketDiagTCP, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagTCP(family uint8) ([]*netlink.Socket, error) {
return WithRetryResult(func() ([]*netlink.Socket, error) {
return netlink.SocketDiagTCP(family)
})
}
// SocketDiagUDPInfo wraps netlink.SocketDiagUDPInfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagUDPInfo(family uint8) ([]*netlink.InetDiagUDPInfoResp, error) {
return WithRetryResult(func() ([]*netlink.InetDiagUDPInfoResp, error) {
return netlink.SocketDiagUDPInfo(family)
})
}
// SocketDiagUDP wraps netlink.SocketDiagUDP, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagUDP(family uint8) ([]*netlink.Socket, error) {
return WithRetryResult(func() ([]*netlink.Socket, error) {
return netlink.SocketDiagUDP(family)
})
}
// UnixSocketDiagInfo wraps netlink.UnixSocketDiagInfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func UnixSocketDiagInfo() ([]*netlink.UnixDiagInfoResp, error) {
return WithRetryResult(func() ([]*netlink.UnixDiagInfoResp, error) {
return netlink.UnixSocketDiagInfo()
})
}
// UnixSocketDiag wraps netlink.UnixSocketDiag, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func UnixSocketDiag() ([]*netlink.UnixSocket, error) {
return WithRetryResult(func() ([]*netlink.UnixSocket, error) {
return netlink.UnixSocketDiag()
})
}
// SocketXDPGetInfo wraps netlink.SocketXDPGetInfo, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketXDPGetInfo(ino uint32, cookie uint64) (*netlink.XDPDiagInfoResp, error) {
return WithRetryResult(func() (*netlink.XDPDiagInfoResp, error) {
return netlink.SocketXDPGetInfo(ino, cookie)
})
}
// SocketDiagXDP wraps netlink.SocketDiagXDP, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func SocketDiagXDP() ([]*netlink.XDPDiagInfoResp, error) {
return WithRetryResult(func() ([]*netlink.XDPDiagInfoResp, error) {
return netlink.SocketDiagXDP()
})
}
// VDPAGetDevList wraps netlink.VDPAGetDevList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func VDPAGetDevList() ([]*netlink.VDPADev, error) {
return WithRetryResult(func() ([]*netlink.VDPADev, error) {
return netlink.VDPAGetDevList()
})
}
// VDPAGetDevConfigList wraps netlink.VDPAGetDevConfigList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func VDPAGetDevConfigList() ([]*netlink.VDPADevConfig, error) {
return WithRetryResult(func() ([]*netlink.VDPADevConfig, error) {
return netlink.VDPAGetDevConfigList()
})
}
// VDPAGetMGMTDevList wraps netlink.VDPAGetMGMTDevList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func VDPAGetMGMTDevList() ([]*netlink.VDPAMGMTDev, error) {
return WithRetryResult(func() ([]*netlink.VDPAMGMTDev, error) {
return netlink.VDPAGetMGMTDevList()
})
}
// XfrmPolicyList wraps netlink.XfrmPolicyList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func XfrmPolicyList(family int) ([]netlink.XfrmPolicy, error) {
return WithRetryResult(func() ([]netlink.XfrmPolicy, error) {
return netlink.XfrmPolicyList(family)
})
}
// XfrmStateList wraps netlink.XfrmStateList, but retries the call automatically
// if netlink.ErrDumpInterrupted is returned
func XfrmStateList(family int) ([]netlink.XfrmState, error) {
return WithRetryResult(func() ([]netlink.XfrmState, error) {
return netlink.XfrmStateList(family)
})
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Based on code from github.com/miekg/dns which is:
//
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright 2011 Miek Gieben. All rights reserved.
// Copyright 2014 CloudFlare. All rights reserved.
package dns
import "strings"
// These functions were copied and adapted from github.com/miekg/dns.
// isFQDN reports whether the domain name s is fully qualified.
func isFQDN(s string) bool {
s2 := strings.TrimSuffix(s, ".")
if s == s2 {
return false
}
i := strings.LastIndexFunc(s2, func(r rune) bool {
return r != '\\'
})
// Test whether we have an even number of escape sequences before
// the dot or none.
return (len(s2)-i)%2 != 0
}
// FQDN returns the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func FQDN(s string) string {
if isFQDN(s) {
return strings.ToLower(s)
}
return strings.ToLower(s) + "."
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package matchpattern
import (
"errors"
"regexp"
"strings"
"github.com/cilium/cilium/pkg/fqdn/dns"
"github.com/cilium/cilium/pkg/fqdn/re"
)
const allowedDNSCharsREGroup = "[-a-zA-Z0-9_]"
// MatchAllAnchoredPattern is the simplest pattern that match all inputs. This resulting
// parsed regular expression is the same as an empty string regex (""), but this
// value is easier to reason about when serializing to and from json.
const MatchAllAnchoredPattern = "(?:)"
// MatchAllUnAnchoredPattern is the same as MatchAllAnchoredPattern, except that
// it can be or-ed (joined with "|") with other rules, and still match all rules.
const MatchAllUnAnchoredPattern = ".*"
// Validate ensures that pattern is a parsable matchPattern. It returns the
// regexp generated when validating.
func Validate(pattern string) (matcher *regexp.Regexp, err error) {
if err := prevalidate(pattern); err != nil {
return nil, err
}
return re.CompileRegex(ToAnchoredRegexp(pattern))
}
// ValidateWithoutCache is the same as Validate() but doesn't consult the regex
// LRU.
func ValidateWithoutCache(pattern string) (matcher *regexp.Regexp, err error) {
if err := prevalidate(pattern); err != nil {
return nil, err
}
return regexp.Compile(ToAnchoredRegexp(pattern))
}
func prevalidate(pattern string) error {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// error check
if strings.ContainsAny(pattern, "[]+{},") {
return errors.New(`Only alphanumeric ASCII characters, the hyphen "-", underscore "_", "." and "*" are allowed in a matchPattern`)
}
return nil
}
// Sanitize canonicalized the pattern for use by ToAnchoredRegexp
func Sanitize(pattern string) string {
if pattern == "*" {
return pattern
}
return dns.FQDN(pattern)
}
// ToAnchoredRegexp converts a MatchPattern field into a regexp string. It does not
// validate the pattern. It also adds anchors to ensure it match the whole string.
// It supports:
// * to select 0 or more DNS valid characters
func ToAnchoredRegexp(pattern string) string {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// handle the * match-all case. This will filter down to the end.
if pattern == "*" {
return "(^(" + allowedDNSCharsREGroup + "+[.])+$)|(^[.]$)"
}
pattern = escapeRegexpCharacters(pattern)
// Anchor the match to require the whole string to match this expression
return "^" + pattern + "$"
}
// ToUnAnchoredRegexp converts a MatchPattern field into a regexp string. It does not
// validate the pattern. It does not add regexp anchors.
// It supports:
// * to select 0 or more DNS valid characters
func ToUnAnchoredRegexp(pattern string) string {
pattern = strings.TrimSpace(pattern)
pattern = strings.ToLower(pattern)
// handle the * match-all case. This will filter down to the end.
if pattern == "*" {
return MatchAllUnAnchoredPattern
}
pattern = escapeRegexpCharacters(pattern)
return pattern
}
func escapeRegexpCharacters(pattern string) string {
// base case. "." becomes a literal .
pattern = strings.Replace(pattern, ".", "[.]", -1)
// base case. * becomes .*, but only for DNS valid characters
// NOTE: this only works because the case above does not leave the *
pattern = strings.Replace(pattern, "*", allowedDNSCharsREGroup+"*", -1)
return pattern
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package matchpattern
func FuzzMatchpatternValidate(data []byte) int {
_, _ = Validate(string(data))
return 1
}
func FuzzMatchpatternValidateWithoutCache(data []byte) int {
_, _ = ValidateWithoutCache(string(data))
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package re provides a simple function to access compile regex objects for
// the FQDN subsystem.
package re
import (
"errors"
"fmt"
"regexp"
"sync/atomic"
lru "github.com/golang/groupcache/lru"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "fqdn/re")
)
// CompileRegex compiles a pattern p into a regex and returns the regex object.
// The regex object will be cached by an LRU. If p has already been compiled
// and cached, this function will return the cached regex object. If not
// already cached, it will compile p into a regex object and cache it in the
// LRU. This function will return an error if the LRU has not already been
// initialized.
func CompileRegex(p string) (*regexp.Regexp, error) {
lru := regexCompileLRU.Load()
if lru == nil {
return nil, errors.New("FQDN regex compilation LRU not yet initialized")
}
lru.Lock()
r, ok := lru.Get(p)
lru.Unlock()
if ok {
return r.(*regexp.Regexp), nil
}
n, err := regexp.Compile(p)
if err != nil {
return nil, fmt.Errorf("failed to compile regex: %w", err)
}
lru.Lock()
lru.Add(p, n)
lru.Unlock()
return n, nil
}
// InitRegexCompileLRU creates a new instance of the regex compilation LRU.
func InitRegexCompileLRU(size int) error {
if size < 0 {
return fmt.Errorf("failed to initialize FQDN regex compilation LRU due to invalid size %d", size)
} else if size == 0 {
log.Warnf(
"FQDN regex compilation LRU size is unlimited, which can grow unbounded potentially consuming too much memory. Consider passing a maximum size via --%s.",
option.FQDNRegexCompileLRUSize)
}
regexCompileLRU.Store(&RegexCompileLRU{
Mutex: &lock.Mutex{},
Cache: lru.New(size),
})
return nil
}
// regexCompileLRU is the singleton instance of the LRU that's shared
// throughout Cilium.
var regexCompileLRU atomic.Pointer[RegexCompileLRU]
// RegexCompileLRU is an LRU cache for storing compiled regex objects of FQDN
// names or patterns, used in CiliumNetworkPolicy or
// ClusterwideCiliumNetworkPolicy.
type RegexCompileLRU struct {
// The lru package doesn't provide any concurrency guarantees so we must
// provide our own locking.
*lock.Mutex
*lru.Cache
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"net"
"net/netip"
"go4.org/netipx"
)
// ParseCIDRs fetches all CIDRs referred to by the specified slice and returns
// them as regular golang CIDR objects.
//
// Deprecated. Consider using ParsePrefixes() instead.
func ParseCIDRs(cidrs []string) (valid []*net.IPNet, invalid []string) {
valid = make([]*net.IPNet, 0, len(cidrs))
invalid = make([]string, 0, len(cidrs))
for _, cidr := range cidrs {
_, prefix, err := net.ParseCIDR(cidr)
if err != nil {
// Likely the CIDR is specified in host format.
ip := net.ParseIP(cidr)
if ip == nil {
invalid = append(invalid, cidr)
continue
} else {
prefix = IPToPrefix(ip)
}
}
if prefix != nil {
valid = append(valid, prefix)
}
}
return valid, invalid
}
// ParsePrefixes parses all CIDRs referred to by the specified slice and
// returns them as regular golang netip.Prefix objects.
func ParsePrefixes(cidrs []string) (valid []netip.Prefix, invalid []string, errors []error) {
valid = make([]netip.Prefix, 0, len(cidrs))
invalid = make([]string, 0, len(cidrs))
errors = make([]error, 0, len(cidrs))
for _, cidr := range cidrs {
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
ip, err2 := netip.ParseAddr(cidr)
if err2 != nil {
invalid = append(invalid, cidr)
errors = append(errors, err2)
continue
}
prefix = netip.PrefixFrom(ip, ip.BitLen())
}
valid = append(valid, prefix.Masked())
}
return valid, invalid, errors
}
// IPToNetPrefix is a convenience helper for migrating from the older 'net'
// standard library types to the newer 'netip' types. Use this to plug the new
// types in newer code into older types in older code during the migration.
//
// Note: This function assumes given ip is not an IPv4 mapped IPv6 address.
//
// The problem behind this is that when we convert the IPv4 net.IP address with
// netip.AddrFromSlice, the address is interpreted as an IPv4 mapped IPv6 address in some
// cases.
//
// For example, when we do netip.AddrFromSlice(net.ParseIP("1.1.1.1")), it is interpreted
// as an IPv6 address "::ffff:1.1.1.1". This is because 1) net.IP created with
// net.ParseIP(IPv4 string) holds IPv4 address as an IPv4 mapped IPv6 address internally
// and 2) netip.AddrFromSlice recognizes address family with length of the slice (4-byte =
// IPv4 and 16-byte = IPv6).
//
// By using netipx.FromStdIP, we can preserve the address family, but since we cannot distinguish
// IPv4 and IPv4 mapped IPv6 address only from net.IP value (see #37921 on golang/go) we
// need an assumption that given net.IP is not an IPv4 mapped IPv6 address.
func IPToNetPrefix(ip net.IP) netip.Prefix {
a, ok := netipx.FromStdIP(ip)
if !ok {
return netip.Prefix{}
}
return netip.PrefixFrom(a, a.BitLen())
}
// IPsToNetPrefixes returns all of the ips as a slice of netip.Prefix.
//
// See IPToNetPrefix() for how net.IP types are handled by this function.
func IPsToNetPrefixes(ips []net.IP) []netip.Prefix {
if len(ips) == 0 {
return nil
}
res := make([]netip.Prefix, 0, len(ips))
for _, ip := range ips {
res = append(res, IPToNetPrefix(ip))
}
return res
}
// NetsContainsAny checks that any subnet in the `a` subnet group *fully*
// contains any of the subnets in the `b` subnet group.
func NetsContainsAny(a, b []*net.IPNet) bool {
for _, an := range a {
aMask, _ := an.Mask.Size()
aIsIPv4 := an.IP.To4() != nil
for _, bn := range b {
bIsIPv4 := bn.IP.To4() != nil
isSameFamily := aIsIPv4 == bIsIPv4
if isSameFamily {
bMask, _ := bn.Mask.Size()
if bMask >= aMask && an.Contains(bn.IP) {
return true
}
}
}
}
return false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"bytes"
"encoding/binary"
"math/big"
"net"
"net/netip"
"slices"
"sort"
"go4.org/netipx"
)
const (
ipv4BitLen = 8 * net.IPv4len
ipv6BitLen = 8 * net.IPv6len
)
// CountIPsInCIDR takes a RFC4632/RFC4291-formatted IPv4/IPv6 CIDR and
// determines how many IP addresses reside within that CIDR.
// The first and the last (base and broadcast) IPs are excluded.
//
// Returns 0 if the input CIDR cannot be parsed.
func CountIPsInCIDR(ipnet *net.IPNet) *big.Int {
subnet, size := ipnet.Mask.Size()
if subnet == size {
return big.NewInt(0)
}
return big.NewInt(0).
Sub(
big.NewInt(2).Exp(big.NewInt(2),
big.NewInt(int64(size-subnet)), nil),
big.NewInt(2),
)
}
var (
// v4Mappedv6Prefix is the RFC2765 IPv4-mapped address prefix.
v4Mappedv6Prefix = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff}
ipv4LeadingZeroes = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
defaultIPv4 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}
defaultIPv6 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
upperIPv4 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 255, 255, 255, 255}
upperIPv6 = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
)
// NetsByMask is used to sort a list of IP networks by the size of their masks.
// Implements sort.Interface.
type NetsByMask []*net.IPNet
func (s NetsByMask) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s NetsByMask) Less(i, j int) bool {
iPrefixSize, _ := s[i].Mask.Size()
jPrefixSize, _ := s[j].Mask.Size()
if iPrefixSize == jPrefixSize {
return bytes.Compare(s[i].IP, s[j].IP) < 0
}
return iPrefixSize < jPrefixSize
}
func (s NetsByMask) Len() int {
return len(s)
}
// Assert that NetsByMask implements sort.Interface.
var _ sort.Interface = NetsByMask{}
var _ sort.Interface = NetsByRange{}
// NetsByRange is used to sort a list of ranges, first by their last IPs, then by
// their first IPs
// Implements sort.Interface.
type NetsByRange []*netWithRange
func (s NetsByRange) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s NetsByRange) Less(i, j int) bool {
// First compare by last IP.
lastComparison := bytes.Compare(*s[i].Last, *s[j].Last)
if lastComparison < 0 {
return true
} else if lastComparison > 0 {
return false
}
// Then compare by first IP.
firstComparison := bytes.Compare(*s[i].First, *s[i].First)
if firstComparison < 0 {
return true
} else if firstComparison > 0 {
return false
}
// First and last IPs are the same, so thus are equal, and s[i]
// is not less than s[j].
return false
}
func (s NetsByRange) Len() int {
return len(s)
}
// removeRedundantCIDRs removes CIDRs which are contained within other given CIDRs.
func removeRedundantCIDRs(CIDRs []*net.IPNet) []*net.IPNet {
redundant := make(map[int]bool)
for j, CIDR := range CIDRs {
if redundant[j] {
continue // Skip redundant CIDRs
}
for i, CIDR2 := range CIDRs {
// Skip checking CIDR aganst itself or if CIDR has already been deemed redundant.
if i == j || redundant[i] {
continue
}
if CIDR.Contains(CIDR2.IP) {
redundant[i] = true
}
}
}
if len(redundant) == 0 {
return CIDRs
}
if len(redundant) == 1 {
for i := range redundant {
return append(CIDRs[:i], CIDRs[i+1:]...)
}
}
newCIDRs := make([]*net.IPNet, 0, len(CIDRs)-len(redundant))
for i := range CIDRs {
if redundant[i] {
continue
}
newCIDRs = append(newCIDRs, CIDRs[i])
}
return newCIDRs
}
// RemoveCIDRs removes the specified CIDRs from another set of CIDRs. If a CIDR
// to remove is not contained within the CIDR, the CIDR to remove is ignored. A
// slice of CIDRs is returned which contains the set of CIDRs provided minus
// the set of CIDRs which were removed. Both input slices may be modified by
// calling this function.
func RemoveCIDRs(allowCIDRs, removeCIDRs []*net.IPNet) []*net.IPNet {
// Ensure that we iterate through the provided CIDRs in order of largest
// subnet first.
sort.Sort(NetsByMask(removeCIDRs))
// Remove CIDRs which are contained within CIDRs that we want to remove;
// such CIDRs are redundant.
removeCIDRs = removeRedundantCIDRs(removeCIDRs)
// Remove redundant allowCIDR so that all allowCIDRs are disjoint
allowCIDRs = removeRedundantCIDRs(allowCIDRs)
for _, remove := range removeCIDRs {
i := 0
for i < len(allowCIDRs) {
allowCIDR := allowCIDRs[i]
// Only remove CIDR if it is contained in the subnet we are allowing.
if allowCIDR.Contains(remove.IP.Mask(remove.Mask)) {
nets := excludeContainedCIDR(allowCIDR, remove)
// Remove CIDR that we have just processed and append new CIDRs
// that we computed from removing the CIDR to remove.
allowCIDRs = append(allowCIDRs[:i], allowCIDRs[i+1:]...)
allowCIDRs = append(allowCIDRs, nets...)
} else if remove.Contains(allowCIDR.IP.Mask(allowCIDR.Mask)) {
// If a CIDR that we want to remove contains a CIDR in the list
// that is allowed, then we can just remove the CIDR to allow.
allowCIDRs = append(allowCIDRs[:i], allowCIDRs[i+1:]...)
} else {
// Advance only if CIDR at index 'i' was not removed
i++
}
}
}
return allowCIDRs
}
func getNetworkPrefix(ipNet *net.IPNet) *net.IP {
var mask net.IP
if ipNet.IP.To4() == nil {
mask = make(net.IP, net.IPv6len)
for i := 0; i < len(ipNet.Mask); i++ {
mask[net.IPv6len-i-1] = ipNet.IP[net.IPv6len-i-1] & ^ipNet.Mask[i]
}
} else {
mask = make(net.IP, net.IPv4len)
for i := 0; i < net.IPv4len; i++ {
mask[net.IPv4len-i-1] = ipNet.IP[net.IPv6len-i-1] & ^ipNet.Mask[i]
}
}
return &mask
}
// excludeContainedCIDR returns a set of CIDRs that is equivalent to 'allowCIDR'
// except for 'removeCIDR', which must be a subset of 'allowCIDR'.
// Caller is responsible for only passing CIDRs of the same address family.
func excludeContainedCIDR(allowCIDR, removeCIDR *net.IPNet) []*net.IPNet {
// Get size of each CIDR mask.
allowSize, addrSize := allowCIDR.Mask.Size()
removeSize, _ := removeCIDR.Mask.Size()
// Removing a CIDR from itself should result into an empty set
if allowSize == removeSize && allowCIDR.IP.Equal(removeCIDR.IP) {
return nil
}
removeIPMasked := removeCIDR.IP.Mask(removeCIDR.Mask)
// Create CIDR prefixes with mask size of Y+1, Y+2 ... X where Y is the mask
// length of the CIDR prefix of allowCIDR from which we are excluding the CIDR
// prefix removeCIDR with mask length X.
allows := make([]*net.IPNet, 0, removeSize-allowSize)
// Scan bits from high to low, where 0th bit is the highest.
// For example, an allowCIDR of size 16 covers bits 0..15,
// so the new bit in the first new mask is 16th bit, for a mask size 17.
for bit := allowSize; bit < removeSize; bit++ {
newMaskSize := bit + 1 // bit numbering starts from 0, 0th bit needs mask of size 1
// The mask for each CIDR prefix is simply the masked removeCIDR with the lowest bit
// within the new mask size flipped.
newMask := net.CIDRMask(newMaskSize, addrSize)
newIPMasked := removeIPMasked.Mask(newMask)
flipNthHighestBit(newIPMasked, uint(bit))
newIPNet := net.IPNet{IP: newIPMasked, Mask: newMask}
allows = append(allows, &newIPNet)
}
return allows
}
// Flip the 'n'th highest bit in 'ip'. 'ip' is modified in place. 'n' is zero indexed.
func flipNthHighestBit(ip net.IP, n uint) {
i := n / 8
ip[i] = ip[i] ^ 0x80>>(n%8)
}
func ipNetToRange(ipNet net.IPNet) netWithRange {
firstIP := make(net.IP, len(ipNet.IP))
lastIP := make(net.IP, len(ipNet.IP))
copy(firstIP, ipNet.IP)
copy(lastIP, ipNet.IP)
firstIP = firstIP.Mask(ipNet.Mask)
lastIP = lastIP.Mask(ipNet.Mask)
if firstIP.To4() != nil {
firstIP = append(v4Mappedv6Prefix, firstIP...)
lastIP = append(v4Mappedv6Prefix, lastIP...)
}
lastIPMask := make(net.IPMask, len(ipNet.Mask))
copy(lastIPMask, ipNet.Mask)
for i := range lastIPMask {
lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1]
lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1]
}
return netWithRange{First: &firstIP, Last: &lastIP, Network: &ipNet}
}
// PrefixCeil converts the given number of IPs to the minimum number of prefixes needed to host those IPs.
// multiple indicates the number of IPs in a single prefix.
func PrefixCeil(numIPs int, multiple int) int {
if numIPs == 0 {
return 0
}
quotient := numIPs / multiple
rem := numIPs % multiple
if rem > 0 {
return quotient + 1
}
return quotient
}
// PrefixToIps converts the given prefix to an array containing IPs in the provided
// prefix/CIDR block. When maxIPs is set to 0, the returned array will contain all IPs
// in the given prefix. Otherwise, the returned array of IPs will be limited to the
// value of maxIPs starting at the first IP in the provided CIDR. For example, when
// providing 192.168.1.0/28 as a CIDR with 4 maxIPs, 192.168.1.0, 192.168.1.1,
// 192.168.1.2, 192.168.1.3 will be returned.
func PrefixToIps(prefixCidr string, maxIPs int) ([]string, error) {
var prefixIps []string
_, ipNet, err := net.ParseCIDR(prefixCidr)
if err != nil {
return prefixIps, err
}
netWithRange := ipNetToRange(*ipNet)
// Ensure last IP in the prefix is included
for ip := *netWithRange.First; len(prefixIps) < maxIPs || maxIPs == 0; ip = getNextIP(ip) {
prefixIps = append(prefixIps, ip.String())
if ip.Equal(*netWithRange.Last) {
break
}
}
return prefixIps, nil
}
// GetIPAtIndex get the IP by index in the range of ipNet. The index is start with 0.
func GetIPAtIndex(ipNet net.IPNet, index int64) net.IP {
netRange := ipNetToRange(ipNet)
val := big.NewInt(0)
var ip net.IP
if index >= 0 {
ip = *netRange.First
} else {
ip = *netRange.Last
index++
}
if ip.To4() != nil {
val.SetBytes(ip.To4())
} else {
val.SetBytes(ip)
}
val.Add(val, big.NewInt(index))
if ipNet.Contains(val.Bytes()) {
return val.Bytes()
}
return nil
}
func getPreviousIP(ip net.IP) net.IP {
// Cannot go lower than zero!
if ip.Equal(defaultIPv4) || ip.Equal(defaultIPv6) {
return ip
}
previousIP := make(net.IP, len(ip))
copy(previousIP, ip)
var overflow bool
var lowerByteBound int
if ip.To4() != nil {
lowerByteBound = net.IPv6len - net.IPv4len
} else {
lowerByteBound = 0
}
for i := len(ip) - 1; i >= lowerByteBound; i-- {
if overflow || i == len(ip)-1 {
previousIP[i]--
}
// Track if we have overflowed and thus need to continue subtracting.
if ip[i] == 0 && previousIP[i] == 255 {
overflow = true
} else {
overflow = false
}
}
return previousIP
}
// getNextIP returns the next IP from the given IP address. If the given IP is
// the last IP of a v4 or v6 range, the same IP is returned.
func getNextIP(ip net.IP) net.IP {
if ip.Equal(upperIPv4) || ip.Equal(upperIPv6) {
return ip
}
nextIP := make(net.IP, len(ip))
switch len(ip) {
case net.IPv4len:
ipU32 := binary.BigEndian.Uint32(ip)
ipU32++
binary.BigEndian.PutUint32(nextIP, ipU32)
return nextIP
case net.IPv6len:
ipU64 := binary.BigEndian.Uint64(ip[net.IPv6len/2:])
ipU64++
binary.BigEndian.PutUint64(nextIP[net.IPv6len/2:], ipU64)
if ipU64 == 0 {
ipU64 = binary.BigEndian.Uint64(ip[:net.IPv6len/2])
ipU64++
binary.BigEndian.PutUint64(nextIP[:net.IPv6len/2], ipU64)
} else {
copy(nextIP[:net.IPv6len/2], ip[:net.IPv6len/2])
}
return nextIP
default:
return ip
}
}
func createSpanningCIDR(r netWithRange) net.IPNet {
// Don't want to modify the values of the provided range, so make copies.
lowest := *r.First
highest := *r.Last
var isIPv4 bool
var spanningMaskSize, bitLen, byteLen int
if lowest.To4() != nil {
isIPv4 = true
bitLen = ipv4BitLen
byteLen = net.IPv4len
} else {
bitLen = ipv6BitLen
byteLen = net.IPv6len
}
if isIPv4 {
spanningMaskSize = ipv4BitLen
} else {
spanningMaskSize = ipv6BitLen
}
// Convert to big Int so we can easily do bitshifting on the IP addresses,
// since golang only provides up to 64-bit unsigned integers.
lowestBig := big.NewInt(0).SetBytes(lowest)
highestBig := big.NewInt(0).SetBytes(highest)
// Starting from largest mask / smallest range possible, apply a mask one bit
// larger in each iteration to the upper bound in the range until we have
// masked enough to pass the lower bound in the range. This
// gives us the size of the prefix for the spanning CIDR to return as
// well as the IP for the CIDR prefix of the spanning CIDR.
for spanningMaskSize > 0 && lowestBig.Cmp(highestBig) < 0 {
spanningMaskSize--
mask := big.NewInt(1)
mask = mask.Lsh(mask, uint(bitLen-spanningMaskSize))
mask = mask.Mul(mask, big.NewInt(-1))
highestBig = highestBig.And(highestBig, mask)
}
// If ipv4, need to append 0s because math.Big gets rid of preceding zeroes.
if isIPv4 {
highest = append(ipv4LeadingZeroes, highestBig.Bytes()...)
} else {
highest = highestBig.Bytes()
}
// Int does not store leading zeroes.
if len(highest) == 0 {
highest = make([]byte, byteLen)
}
newNet := net.IPNet{IP: highest, Mask: net.CIDRMask(spanningMaskSize, bitLen)}
return newNet
}
type netWithRange struct {
First *net.IP
Last *net.IP
Network *net.IPNet
}
func mergeAdjacentCIDRs(ranges []*netWithRange) []*netWithRange {
// Sort the ranges. This sorts first by the last IP, then first IP, then by
// the IP network in the list itself
sort.Sort(NetsByRange(ranges))
// Merge adjacent CIDRs if possible.
for i := len(ranges) - 1; i > 0; i-- {
first1 := getPreviousIP(*ranges[i].First)
// Since the networks are sorted, we know that if a network in the list
// is adjacent to another one in the list, it will be the network next
// to it in the list. If the previous IP of the current network we are
// processing overlaps with the last IP of the previous network in the
// list, then we can merge the two ranges together.
if bytes.Compare(first1, *ranges[i-1].Last) <= 0 {
// Pick the minimum of the first two IPs to represent the start
// of the new range.
var minFirstIP *net.IP
if bytes.Compare(*ranges[i-1].First, *ranges[i].First) < 0 {
minFirstIP = ranges[i-1].First
} else {
minFirstIP = ranges[i].First
}
// Always take the last IP of the ith IP.
newRangeLast := make(net.IP, len(*ranges[i].Last))
copy(newRangeLast, *ranges[i].Last)
newRangeFirst := make(net.IP, len(*minFirstIP))
copy(newRangeFirst, *minFirstIP)
// Can't set the network field because since we are combining a
// range of IPs, and we don't yet know what CIDR prefix(es) represent
// the new range.
ranges[i-1] = &netWithRange{First: &newRangeFirst, Last: &newRangeLast, Network: nil}
// Since we have combined ranges[i] with the preceding item in the
// ranges list, we can delete ranges[i] from the slice.
ranges = append(ranges[:i], ranges[i+1:]...)
}
}
return ranges
}
// coalesceRanges converts ranges into an equivalent list of net.IPNets.
// All IPs in ranges should be of the same address family (IPv4 or IPv6).
func coalesceRanges(ranges []*netWithRange) []*net.IPNet {
coalescedCIDRs := []*net.IPNet{}
// Create CIDRs from ranges that were combined if needed.
for _, netRange := range ranges {
// If the Network field of netWithRange wasn't modified, then we can
// add it to the list which we will return, as it cannot be joined with
// any other CIDR in the list.
if netRange.Network != nil {
coalescedCIDRs = append(coalescedCIDRs, netRange.Network)
} else {
// We have joined two ranges together, so we need to find the new CIDRs
// that represent this range.
rangeCIDRs := rangeToCIDRs(*netRange.First, *netRange.Last)
coalescedCIDRs = append(coalescedCIDRs, rangeCIDRs...)
}
}
return coalescedCIDRs
}
// CoalesceCIDRs transforms the provided list of CIDRs into the most-minimal
// equivalent set of IPv4 and IPv6 CIDRs.
// It removes CIDRs that are subnets of other CIDRs in the list, and groups
// together CIDRs that have the same mask size into a CIDR of the same mask
// size provided that they share the same number of most significant
// mask-size bits.
//
// Note: this algorithm was ported from the Python library netaddr.
// https://github.com/drkjam/netaddr .
func CoalesceCIDRs(cidrs []*net.IPNet) ([]*net.IPNet, []*net.IPNet) {
ranges4 := []*netWithRange{}
ranges6 := []*netWithRange{}
for _, network := range cidrs {
newNetToRange := ipNetToRange(*network)
if network.IP.To4() != nil {
ranges4 = append(ranges4, &newNetToRange)
} else {
ranges6 = append(ranges6, &newNetToRange)
}
}
return coalesceRanges(mergeAdjacentCIDRs(ranges4)), coalesceRanges(mergeAdjacentCIDRs(ranges6))
}
// rangeToCIDRs converts the range of IPs covered by firstIP and lastIP to
// a list of CIDRs that contains all of the IPs covered by the range.
func rangeToCIDRs(firstIP, lastIP net.IP) []*net.IPNet {
// First, create a CIDR that spans both IPs.
spanningCIDR := createSpanningCIDR(netWithRange{&firstIP, &lastIP, nil})
spanningRange := ipNetToRange(spanningCIDR)
firstIPSpanning := spanningRange.First
lastIPSpanning := spanningRange.Last
cidrList := []*net.IPNet{}
// If the first IP of the spanning CIDR passes the lower bound (firstIP),
// we need to split the spanning CIDR and only take the IPs that are
// greater than the value which we split on, as we do not want the lesser
// values since they are less than the lower-bound (firstIP).
if bytes.Compare(*firstIPSpanning, firstIP) < 0 {
// Split on the previous IP of the first IP so that the right list of IPs
// of the partition includes the firstIP.
prevFirstRangeIP := getPreviousIP(firstIP)
var bitLen int
if prevFirstRangeIP.To4() != nil {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
_, _, right := PartitionCIDR(spanningCIDR, net.IPNet{IP: prevFirstRangeIP, Mask: net.CIDRMask(bitLen, bitLen)})
// Append all CIDRs but the first, as this CIDR includes the upper
// bound of the spanning CIDR, which we still need to partition on.
cidrList = append(cidrList, right...)
spanningCIDR = *right[0]
cidrList = cidrList[1:]
}
// Conversely, if the last IP of the spanning CIDR passes the upper bound
// (lastIP), we need to split the spanning CIDR and only take the IPs that
// are greater than the value which we split on, as we do not want the greater
// values since they are greater than the upper-bound (lastIP).
if bytes.Compare(*lastIPSpanning, lastIP) > 0 {
// Split on the next IP of the last IP so that the left list of IPs
// of the partition include the lastIP.
nextFirstRangeIP := getNextIP(lastIP)
var bitLen int
if nextFirstRangeIP.To4() != nil {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
left, _, _ := PartitionCIDR(spanningCIDR, net.IPNet{IP: nextFirstRangeIP, Mask: net.CIDRMask(bitLen, bitLen)})
cidrList = append(cidrList, left...)
} else {
// Otherwise, there is no need to partition; just use add the spanning
// CIDR to the list of networks.
cidrList = append(cidrList, &spanningCIDR)
}
return cidrList
}
// PartitionCIDR returns a list of IP Networks partitioned upon excludeCIDR.
// The first list contains the networks to the left of the excludeCIDR in the
// partition, the second is a list containing the excludeCIDR itself if it is
// contained within the targetCIDR (nil otherwise), and the
// third is a list containing the networks to the right of the excludeCIDR in
// the partition.
func PartitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, []*net.IPNet, []*net.IPNet) {
var targetIsIPv4 bool
if targetCIDR.IP.To4() != nil {
targetIsIPv4 = true
}
targetIPRange := ipNetToRange(targetCIDR)
excludeIPRange := ipNetToRange(excludeCIDR)
targetFirstIP := *targetIPRange.First
targetLastIP := *targetIPRange.Last
excludeFirstIP := *excludeIPRange.First
excludeLastIP := *excludeIPRange.Last
targetMaskSize, _ := targetCIDR.Mask.Size()
excludeMaskSize, _ := excludeCIDR.Mask.Size()
if bytes.Compare(excludeLastIP, targetFirstIP) < 0 {
return nil, nil, []*net.IPNet{&targetCIDR}
} else if bytes.Compare(targetLastIP, excludeFirstIP) < 0 {
return []*net.IPNet{&targetCIDR}, nil, nil
}
if targetMaskSize >= excludeMaskSize {
return nil, []*net.IPNet{&targetCIDR}, nil
}
left := []*net.IPNet{}
right := []*net.IPNet{}
newPrefixLen := targetMaskSize + 1
targetFirstCopy := make(net.IP, len(targetFirstIP))
copy(targetFirstCopy, targetFirstIP)
iLowerOld := make(net.IP, len(targetFirstCopy))
copy(iLowerOld, targetFirstCopy)
// Since golang only supports up to unsigned 64-bit integers, and we need
// to perform addition on addresses, use math/big library, which allows
// for manipulation of large integers.
// Used to track the current lower and upper bounds of the ranges to compare
// to excludeCIDR.
iLower := big.NewInt(0)
iUpper := big.NewInt(0)
iLower = iLower.SetBytes(targetFirstCopy)
var bitLen int
if targetIsIPv4 {
bitLen = ipv4BitLen
} else {
bitLen = ipv6BitLen
}
shiftAmount := (uint)(bitLen - newPrefixLen)
targetIPInt := big.NewInt(0)
targetIPInt.SetBytes(targetFirstIP.To16())
exp := big.NewInt(0)
// Use left shift for exponentiation
exp = exp.Lsh(big.NewInt(1), shiftAmount)
iUpper = iUpper.Add(targetIPInt, exp)
matched := big.NewInt(0)
for excludeMaskSize >= newPrefixLen {
// Append leading zeros to IPv4 addresses, as math.Big.Int does not
// append them when the IP address is copied from a byte array to
// math.Big.Int. Leading zeroes are required for parsing IPv4 addresses
// for use with net.IP / net.IPNet.
var iUpperBytes, iLowerBytes []byte
if targetIsIPv4 {
iUpperBytes = append(ipv4LeadingZeroes, iUpper.Bytes()...)
iLowerBytes = append(ipv4LeadingZeroes, iLower.Bytes()...)
} else {
iUpperBytesLen := len(iUpper.Bytes())
// Make sure that the number of bytes in the array matches what net
// package expects, as big package doesn't append leading zeroes.
if iUpperBytesLen != net.IPv6len {
numZeroesToAppend := net.IPv6len - iUpperBytesLen
zeroBytes := make([]byte, numZeroesToAppend)
iUpperBytes = append(zeroBytes, iUpper.Bytes()...)
} else {
iUpperBytes = iUpper.Bytes()
}
iLowerBytesLen := len(iLower.Bytes())
if iLowerBytesLen != net.IPv6len {
numZeroesToAppend := net.IPv6len - iLowerBytesLen
zeroBytes := make([]byte, numZeroesToAppend)
iLowerBytes = append(zeroBytes, iLower.Bytes()...)
} else {
iLowerBytes = iLower.Bytes()
}
}
// If the IP we are excluding over is of a higher value than the current
// CIDR prefix we are generating, add the CIDR prefix to the set of IPs
// to the left of the exclude CIDR
if bytes.Compare(excludeFirstIP, iUpperBytes) >= 0 {
left = append(left, &net.IPNet{IP: iLowerBytes, Mask: net.CIDRMask(newPrefixLen, bitLen)})
matched = matched.Set(iUpper)
} else {
// Same as above, but opposite.
right = append(right, &net.IPNet{IP: iUpperBytes, Mask: net.CIDRMask(newPrefixLen, bitLen)})
matched = matched.Set(iLower)
}
newPrefixLen++
if newPrefixLen > bitLen {
break
}
iLower = iLower.Set(matched)
iUpper = iUpper.Add(matched, big.NewInt(0).Lsh(big.NewInt(1), uint(bitLen-newPrefixLen)))
}
excludeList := []*net.IPNet{&excludeCIDR}
return left, excludeList, right
}
// KeepUniqueAddrs transforms the provided multiset of IP addresses into a
// single set, lexicographically sorted via comparison of the addresses using
// netip.Addr.Compare (i.e. IPv4 addresses show up before IPv6).
// The slice is manipulated in-place destructively; it does not create a new slice.
func KeepUniqueAddrs(addrs []netip.Addr) []netip.Addr {
SortAddrList(addrs)
return slices.Compact(addrs)
}
var privateIPBlocks []*net.IPNet
func initPrivatePrefixes() {
// We only care about global scope prefixes here.
for _, cidr := range []string{
"0.0.0.0/8", // RFC1122 - IPv4 Host on this network
"10.0.0.0/8", // RFC1918 - IPv4 Private-Use Networks
"100.64.0.0/10", // RFC6598 - IPv4 Shared address space
"127.0.0.0/8", // RFC1122 - IPv4 Loopback
"169.254.0.0/16", // RFC3927 - IPv4 Link-Local
"172.16.0.0/12", // RFC1918 - IPv4 Private-Use Networks
"192.0.0.0/24", // RFC6890 - IPv4 IETF Assignments
"192.0.2.0/24", // RFC5737 - IPv4 TEST-NET-1
"192.168.0.0/16", // RFC1918 - IPv4 Private-Use Networks
"198.18.0.0/15", // RFC2544 - IPv4 Interconnect Benchmarks
"198.51.100.0/24", // RFC5737 - IPv4 TEST-NET-2
"203.0.113.0/24", // RFC5737 - IPv4 TEST-NET-3
"224.0.0.0/4", // RFC5771 - IPv4 Multicast
"::/128", // RFC4291 - IPv6 Unspecified
"::1/128", // RFC4291 - IPv6 Loopback
"100::/64", // RFC6666 - IPv6 Discard-Only Prefix
"2001:2::/48", // RFC5180 - IPv6 Benchmarking
"2001:db8::/48", // RFC3849 - IPv6 Documentation
"fc00::/7", // RFC4193 - IPv6 Unique-Local
"fe80::/10", // RFC4291 - IPv6 Link-Local
"ff00::/8", // RFC4291 - IPv6 Multicast
} {
_, block, _ := net.ParseCIDR(cidr)
privateIPBlocks = append(privateIPBlocks, block)
}
}
func init() {
initPrivatePrefixes()
}
// IsPublicAddr returns whether a given global IP is from
// a public range.
func IsPublicAddr(ip net.IP) bool {
for _, block := range privateIPBlocks {
if block.Contains(ip) {
return false
}
}
return true
}
// IPToPrefix returns the corresponding IPNet for the given IP.
func IPToPrefix(ip net.IP) *net.IPNet {
bits := net.IPv6len * 8
if ip.To4() != nil {
ip = ip.To4()
bits = net.IPv4len * 8
}
prefix := &net.IPNet{
IP: ip,
Mask: net.CIDRMask(bits, bits),
}
return prefix
}
// IsIPv4 returns true if the given IP is an IPv4
func IsIPv4(ip net.IP) bool {
return ip.To4() != nil
}
// IsIPv6 returns if netIP is IPv6.
func IsIPv6(ip net.IP) bool {
return ip != nil && ip.To4() == nil
}
// ListContainsIP returns whether a list of IPs contains a given IP.
func ListContainsIP(ipList []net.IP, ip net.IP) bool {
for _, e := range ipList {
if e.Equal(ip) {
return true
}
}
return false
}
// SortIPList sorts the provided net.IP slice in place.
func SortIPList(ipList []net.IP) {
slices.SortFunc(ipList, func(a, b net.IP) int { return bytes.Compare(a, b) })
}
func SortAddrList(ipList []netip.Addr) {
slices.SortFunc(ipList, netip.Addr.Compare)
}
// getSortedIPList returns a new net.IP slice in which the IPs are sorted.
func getSortedIPList(ipList []net.IP) []net.IP {
sortedIPList := make([]net.IP, len(ipList))
copy(sortedIPList, ipList)
SortIPList(sortedIPList)
return sortedIPList
}
// UnsortedIPListsAreEqual returns true if the list of net.IP provided is same
// without considering the order of the IPs in the list. The function will first
// attempt to sort both the IP lists and then validate equality for sorted lists.
func UnsortedIPListsAreEqual(ipList1, ipList2 []net.IP) bool {
// The IP set is definitely different if the lengths are different.
if len(ipList1) != len(ipList2) {
return false
}
a := getSortedIPList(ipList1)
b := getSortedIPList(ipList2)
// Lengths are equal, so each member in one set must be in the other
// If any IPs at the same index differ the sorted IP list are not equal.
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
// GetIPFromListByFamily returns a single IP address of the provided family from a list
// of ip addresses.
func GetIPFromListByFamily(ipList []net.IP, v4Family bool) net.IP {
for _, ipAddr := range ipList {
if v4Family == IsIPv4(ipAddr) || (!v4Family && IsIPv6(ipAddr)) {
return ipAddr
}
}
return nil
}
// MustAddrsFromIPs converts a slice of net.IP to a slice of netip.Addr. It assumes
// the input slice contains only valid IP addresses and always returns a slice
// containing valid netip.Addr.
func MustAddrsFromIPs(ips []net.IP) []netip.Addr {
addrs := make([]netip.Addr, 0, len(ips))
for _, ip := range ips {
addrs = append(addrs, netipx.MustFromStdIP(ip))
}
return addrs
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package ip
import (
"strconv"
"github.com/vishvananda/netlink"
)
func ParseScope(scope string) (int, error) {
switch scope {
case "global":
return int(netlink.SCOPE_UNIVERSE), nil
case "nowhere":
return int(netlink.SCOPE_NOWHERE), nil
case "host":
return int(netlink.SCOPE_HOST), nil
case "link":
return int(netlink.SCOPE_LINK), nil
case "site":
return int(netlink.SCOPE_SITE), nil
default:
return strconv.Atoi(scope)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen:private-method=true
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumclusterwidenetworkpolicy",path="ciliumclusterwidenetworkpolicies",scope="Cluster",shortName={ccnp}
// +kubebuilder:printcolumn:JSONPath=".status.conditions[?(@.type=='Valid')].status",name="Valid",type=string
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumClusterwideNetworkPolicy is a Kubernetes third-party resource with an
// modified version of CiliumNetworkPolicy which is cluster scoped rather than
// namespace scoped.
type CiliumClusterwideNetworkPolicy struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired Cilium specific rule specification.
Spec *api.Rule `json:"spec,omitempty"`
// Specs is a list of desired Cilium specific rule specification.
Specs api.Rules `json:"specs,omitempty"`
// Status is the status of the Cilium policy rule.
//
// The reason this field exists in this structure is due a bug in the k8s
// code-generator that doesn't create a `UpdateStatus` method because the
// field does not exist in the structure.
//
// +kubebuilder:validation:Optional
Status CiliumNetworkPolicyStatus `json:"status"`
}
// DeepEqual compares 2 CCNPs while ignoring the LastAppliedConfigAnnotation
// and ignoring the Status field of the CCNP.
func (in *CiliumClusterwideNetworkPolicy) DeepEqual(other *CiliumClusterwideNetworkPolicy) bool {
return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other)
}
// SetDerivedPolicyStatus set the derivative policy status for the given
// derivative policy name.
func (r *CiliumClusterwideNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) {
if r.Status.DerivativePolicies == nil {
r.Status.DerivativePolicies = map[string]CiliumNetworkPolicyNodeStatus{}
}
r.Status.DerivativePolicies[derivativePolicyName] = status
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumClusterwideNetworkPolicyList is a list of
// CiliumClusterwideNetworkPolicy objects.
type CiliumClusterwideNetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumClusterwideNetworkPolicies.
Items []CiliumClusterwideNetworkPolicy `json:"items"`
}
// Parse parses a CiliumClusterwideNetworkPolicy and returns a list of cilium
// policy rules.
func (r *CiliumClusterwideNetworkPolicy) Parse() (api.Rules, error) {
if r.ObjectMeta.Name == "" {
return nil, NewErrParse("CiliumClusterwideNetworkPolicy must have name")
}
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
retRules := api.Rules{}
if r.Spec == nil && r.Specs == nil {
return nil, ErrEmptyCCNP
}
if r.Spec != nil {
if err := r.Spec.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumClusterwideNetworkPolicy spec: %s", err))
}
cr := k8sCiliumUtils.ParseToCiliumRule("", name, uid, r.Spec)
retRules = append(retRules, cr)
}
if r.Specs != nil {
for _, rule := range r.Specs {
if err := rule.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumClusterwideNetworkPolicy specs: %s", err))
}
cr := k8sCiliumUtils.ParseToCiliumRule("", name, uid, rule)
retRules = append(retRules, cr)
}
}
return retRules, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"bytes"
"encoding/json"
"fmt"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/option"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumenvoyconfig",path="ciliumenvoyconfigs",scope="Namespaced",shortName={cec}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
// +kubebuilder:storageversion
type CiliumEnvoyConfig struct {
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// +k8s:openapi-gen=false
Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumEnvoyConfigList is a list of CiliumEnvoyConfig objects.
type CiliumEnvoyConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumEnvoyConfig.
Items []CiliumEnvoyConfig `json:"items"`
}
type CiliumEnvoyConfigSpec struct {
// Services specifies Kubernetes services for which traffic is
// forwarded to an Envoy listener for L7 load balancing. Backends
// of these services are automatically synced to Envoy usign EDS.
//
// +kubebuilder:validation:Optional
Services []*ServiceListener `json:"services,omitempty"`
// BackendServices specifies Kubernetes services whose backends
// are automatically synced to Envoy using EDS. Traffic for these
// services is not forwarded to an Envoy listener. This allows an
// Envoy listener load balance traffic to these backends while
// normal Cilium service load balancing takes care of balancing
// traffic for these services at the same time.
//
// +kubebuilder:validation:Optional
BackendServices []*Service `json:"backendServices,omitempty"`
// Envoy xDS resources, a list of the following Envoy resource types:
// type.googleapis.com/envoy.config.listener.v3.Listener,
// type.googleapis.com/envoy.config.route.v3.RouteConfiguration,
// type.googleapis.com/envoy.config.cluster.v3.Cluster,
// type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment, and
// type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret.
//
// +kubebuilder:validation:Required
Resources []XDSResource `json:"resources,omitempty"`
// NodeSelector is a label selector that determines to which nodes
// this configuration applies.
// If nil, then this config applies to all nodes.
//
// +kubebuilder:validation:Optional
NodeSelector *slim_metav1.LabelSelector `json:"nodeSelector,omitempty"`
}
type Service struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"name"`
// Namespace is the Kubernetes service namespace.
// In CiliumEnvoyConfig namespace defaults to the namespace of the CEC,
// In CiliumClusterwideEnvoyConfig namespace defaults to "default".
// +kubebuilder:validation:Optional
Namespace string `json:"namespace"`
// Ports is a set of port numbers, which can be used for filtering in case of underlying
// is exposing multiple port numbers.
//
// +kubebuilder:validation:Optional
Ports []string `json:"number,omitempty"`
}
func (l *Service) ServiceName() loadbalancer.ServiceName {
return loadbalancer.ServiceName{
Namespace: l.Namespace,
Name: l.Name,
}
}
type ServiceListener struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"name"`
// Namespace is the Kubernetes service namespace.
// In CiliumEnvoyConfig namespace this is overridden to the namespace of the CEC,
// In CiliumClusterwideEnvoyConfig namespace defaults to "default".
// +kubebuilder:validation:Optional
Namespace string `json:"namespace"`
// Ports is a set of service's frontend ports that should be redirected to the Envoy
// listener. By default all frontend ports of the service are redirected.
//
// +kubebuilder:validation:Optional
Ports []uint16 `json:"ports,omitempty"`
// Listener specifies the name of the Envoy listener the
// service traffic is redirected to. The listener must be
// specified in the Envoy 'resources' of the same
// CiliumEnvoyConfig.
//
// If omitted, the first listener specified in 'resources' is
// used.
//
// +kubebuilder:validation:Optional
Listener string `json:"listener"`
}
func (l *ServiceListener) ServiceName() loadbalancer.ServiceName {
return loadbalancer.ServiceName{
Namespace: l.Namespace,
Name: l.Name,
}
}
// +kubebuilder:pruning:PreserveUnknownFields
type XDSResource struct {
*anypb.Any `json:"-"`
}
// DeepCopyInto deep copies 'in' into 'out'.
func (in *XDSResource) DeepCopyInto(out *XDSResource) {
out.Any, _ = proto.Clone(in.Any).(*anypb.Any)
}
// DeepEqual returns 'true' if 'a' and 'b' are equal.
func (a *XDSResource) DeepEqual(b *XDSResource) bool {
return proto.Equal(a.Any, b.Any)
}
// MarshalJSON ensures that the unstructured object produces proper
// JSON when passed to Go's standard JSON library.
func (u *XDSResource) MarshalJSON() ([]byte, error) {
return protojson.Marshal(u.Any)
}
// UnmarshalJSON ensures that the unstructured object properly decodes
// JSON when passed to Go's standard JSON library.
func (u *XDSResource) UnmarshalJSON(b []byte) (err error) {
// xDS resources are not validated in K8s, recover from possible panics
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("CEC JSON decoding paniced: %v", r)
}
}()
u.Any = &anypb.Any{}
err = protojson.Unmarshal(b, u.Any)
if err != nil {
var buf bytes.Buffer
json.Indent(&buf, b, "", "\t")
log.Warningf("Ignoring invalid CiliumEnvoyConfig JSON (%s): %s",
err, buf.String())
} else if option.Config.Debug {
log.Debugf("CEC unmarshaled XDS Resource: %v", prototext.Format(u.Any))
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
"strconv"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/pkg/iana"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
lb "github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumlocalredirectpolicy",path="ciliumlocalredirectpolicies",scope="Namespaced",shortName={clrp}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// CiliumLocalRedirectPolicy is a Kubernetes Custom Resource that contains a
// specification to redirect traffic locally within a node.
type CiliumLocalRedirectPolicy struct {
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +k8s:openapi-gen=false
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired behavior of the local redirect policy.
Spec CiliumLocalRedirectPolicySpec `json:"spec,omitempty"`
// Status is the most recent status of the local redirect policy.
// It is a read-only field.
//
// +deepequal-gen=false
// +kubebuilder:validation:Optional
Status CiliumLocalRedirectPolicyStatus `json:"status"`
}
type Frontend struct {
// IP is a destination ip address for traffic to be redirected.
//
// Example:
// When it is set to "169.254.169.254", traffic destined to
// "169.254.169.254" is redirected.
//
// +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))`
// +kubebuilder:validation:Required
IP string `json:"ip"`
// ToPorts is a list of destination L4 ports with protocol for traffic
// to be redirected.
// When multiple ports are specified, the ports must be named.
//
// Example:
// When set to Port: "53" and Protocol: UDP, traffic destined to port '53'
// with UDP protocol is redirected.
//
// +kubebuilder:validation:Required
ToPorts []PortInfo `json:"toPorts"`
}
// RedirectFrontend is a frontend configuration that matches traffic that needs to be redirected.
// The configuration must be specified using a ip/port tuple or a Kubernetes service.
type RedirectFrontend struct {
// AddressMatcher is a tuple {IP, port, protocol} that matches traffic to be
// redirected.
//
// +kubebuilder:validation:OneOf
AddressMatcher *Frontend `json:"addressMatcher,omitempty"`
// ServiceMatcher specifies Kubernetes service and port that matches
// traffic to be redirected.
//
// +kubebuilder:validation:OneOf
ServiceMatcher *ServiceInfo `json:"serviceMatcher,omitempty"`
}
// PortInfo specifies L4 port number and name along with the transport protocol
type PortInfo struct {
// Port is an L4 port number. The string will be strictly parsed as a single uint16.
//
// +kubebuilder:validation:Pattern=`^()([1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5])$`
// +kubebuilder:validation:Required
Port string `json:"port"`
// Protocol is the L4 protocol.
// Accepted values: "TCP", "UDP"
//
// +kubebuilder:validation:Enum=TCP;UDP
// +kubebuilder:validation:Required
Protocol api.L4Proto `json:"protocol"`
// Name is a port name, which must contain at least one [a-z],
// and may also contain [0-9] and '-' anywhere except adjacent to another
// '-' or in the beginning or the end.
//
// +kubebuilder:validation:Pattern=`^([0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$`
// +kubebuilder:validation:Optional
Name string `json:"name"`
}
type ServiceInfo struct {
// Name is the name of a destination Kubernetes service that identifies traffic
// to be redirected.
// The service type needs to be ClusterIP.
//
// Example:
// When this field is populated with 'serviceName:myService', all the traffic
// destined to the cluster IP of this service at the (specified)
// service port(s) will be redirected.
//
// +kubebuilder:validation:Required
Name string `json:"serviceName"`
// Namespace is the Kubernetes service namespace.
// The service namespace must match the namespace of the parent Local
// Redirect Policy. For Cluster-wide Local Redirect Policy, this
// can be any namespace.
// +kubebuilder:validation:Required
Namespace string `json:"namespace"`
// ToPorts is a list of destination service L4 ports with protocol for
// traffic to be redirected. If not specified, traffic for all the service
// ports will be redirected.
// When multiple ports are specified, the ports must be named.
//
// +kubebuilder:validation:Optional
ToPorts []PortInfo `json:"toPorts,omitempty"`
}
// RedirectBackend is a backend configuration that determines where traffic needs to be redirected to.
type RedirectBackend struct {
// LocalEndpointSelector selects node local pod(s) where traffic is redirected to.
//
// +kubebuilder:validation:Required
LocalEndpointSelector slim_metav1.LabelSelector `json:"localEndpointSelector"`
// ToPorts is a list of L4 ports with protocol of node local pod(s) where traffic
// is redirected to.
// When multiple ports are specified, the ports must be named.
//
// +kubebuilder:validation:Required
ToPorts []PortInfo `json:"toPorts"`
}
// CiliumLocalRedirectPolicySpec specifies the configurations for redirecting traffic
// within a node.
type CiliumLocalRedirectPolicySpec struct {
// RedirectFrontend specifies frontend configuration to redirect traffic from.
// It can not be empty.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="redirectFrontend is immutable"
RedirectFrontend RedirectFrontend `json:"redirectFrontend"`
// RedirectBackend specifies backend configuration to redirect traffic to.
// It can not be empty.
//
// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="redirectBackend is immutable"
RedirectBackend RedirectBackend `json:"redirectBackend"`
// SkipRedirectFromBackend indicates whether traffic matching RedirectFrontend
// from RedirectBackend should skip redirection, and hence the traffic will
// be forwarded as-is.
//
// The default is false which means traffic matching RedirectFrontend will
// get redirected from all pods, including the RedirectBackend(s).
//
// Example: If RedirectFrontend is configured to "169.254.169.254:80" as the traffic
// that needs to be redirected to backends selected by RedirectBackend, if
// SkipRedirectFromBackend is set to true, traffic going to "169.254.169.254:80"
// from such backends will not be redirected back to the backends. Instead,
// the matched traffic from the backends will be forwarded to the original
// destination "169.254.169.254:80".
//
// +kubebuilder:validation:Optional
// +kubebuilder:default=false
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="skipRedirectFromBackend is immutable"
SkipRedirectFromBackend bool `json:"skipRedirectFromBackend"`
// Description can be used by the creator of the policy to describe the
// purpose of this policy.
//
// +kubebuilder:validation:Optional
Description string `json:"description,omitempty"`
}
// CiliumLocalRedirectPolicyStatus is the status of a Local Redirect Policy.
type CiliumLocalRedirectPolicyStatus struct {
// TODO Define status(aditi)
OK bool `json:"ok,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumLocalRedirectPolicyList is a list of CiliumLocalRedirectPolicy objects.
type CiliumLocalRedirectPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumLocalRedirectPolicy
Items []CiliumLocalRedirectPolicy `json:"items"`
}
// SanitizePortInfo sanitizes all the fields in the PortInfo.
// It returns port number, name, and protocol derived from the given input and error (failure cases).
func (pInfo *PortInfo) SanitizePortInfo(checkNamedPort bool) (uint16, string, lb.L4Type, error) {
var (
pInt uint16
pName string
protocol lb.L4Type
)
// Sanitize port
if pInfo.Port == "" {
return pInt, pName, protocol, fmt.Errorf("port must be specified")
} else {
p, err := strconv.ParseUint(pInfo.Port, 0, 16)
if err != nil {
return pInt, pName, protocol, fmt.Errorf("unable to parse port: %w", err)
}
if p == 0 {
return pInt, pName, protocol, fmt.Errorf("port cannot be 0")
}
pInt = uint16(p)
}
// Sanitize name
if checkNamedPort {
if pInfo.Name == "" {
return pInt, pName, protocol, fmt.Errorf("port %s in the local "+
"redirect policy spec must have a valid IANA_SVC_NAME, as there are multiple ports", pInfo.Port)
}
if !iana.IsSvcName(pInfo.Name) {
return pInt, pName, protocol, fmt.Errorf("port name %s isn't a "+
"valid IANA_SVC_NAME", pInfo.Name)
}
}
pName = strings.ToLower(pInfo.Name) // Normalize for case insensitive comparison
// Sanitize protocol
var err error
protocol, err = lb.NewL4Type(string(pInfo.Protocol))
if err != nil {
return pInt, pName, protocol, err
}
return pInt, pName, protocol, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/pkg/comparator"
k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
k8sUtils "github.com/cilium/cilium/pkg/k8s/utils"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/api"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen:private-method=true
// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumnetworkpolicy",path="ciliumnetworkpolicies",scope="Namespaced",shortName={cnp,ciliumnp}
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
// +kubebuilder:printcolumn:JSONPath=".status.conditions[?(@.type=='Valid')].status",name="Valid",type=string
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumNetworkPolicy is a Kubernetes third-party resource with an extended
// version of NetworkPolicy.
type CiliumNetworkPolicy struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec is the desired Cilium specific rule specification.
Spec *api.Rule `json:"spec,omitempty"`
// Specs is a list of desired Cilium specific rule specification.
Specs api.Rules `json:"specs,omitempty"`
// Status is the status of the Cilium policy rule
//
// +deepequal-gen=false
// +kubebuilder:validation:Optional
Status CiliumNetworkPolicyStatus `json:"status"`
}
// DeepEqual compares 2 CNPs.
func (in *CiliumNetworkPolicy) DeepEqual(other *CiliumNetworkPolicy) bool {
return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other)
}
// objectMetaDeepEqual performs an equality check for metav1.ObjectMeta that
// ignores the LastAppliedConfigAnnotation. This function's usage is shared
// among CNP and CCNP as they have the same structure.
func objectMetaDeepEqual(in, other metav1.ObjectMeta) bool {
if !(in.Name == other.Name && in.Namespace == other.Namespace) {
return false
}
return comparator.MapStringEqualsIgnoreKeys(
in.GetAnnotations(),
other.GetAnnotations(),
// Ignore v1.LastAppliedConfigAnnotation annotation
[]string{v1.LastAppliedConfigAnnotation})
}
// +deepequal-gen=true
// CiliumNetworkPolicyStatus is the status of a Cilium policy rule.
type CiliumNetworkPolicyStatus struct {
// DerivativePolicies is the status of all policies derived from the Cilium
// policy
DerivativePolicies map[string]CiliumNetworkPolicyNodeStatus `json:"derivativePolicies,omitempty"`
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
Conditions []NetworkPolicyCondition `json:"conditions,omitempty"`
}
// +deepequal-gen=true
// CiliumNetworkPolicyNodeStatus is the status of a Cilium policy rule for a
// specific node.
type CiliumNetworkPolicyNodeStatus struct {
// OK is true when the policy has been parsed and imported successfully
// into the in-memory policy repository on the node.
OK bool `json:"ok,omitempty"`
// Error describes any error that occurred when parsing or importing the
// policy, or realizing the policy for the endpoints to which it applies
// on the node.
Error string `json:"error,omitempty"`
// LastUpdated contains the last time this status was updated
LastUpdated slimv1.Time `json:"lastUpdated,omitempty"`
// Revision is the policy revision of the repository which first implemented
// this policy.
Revision uint64 `json:"localPolicyRevision,omitempty"`
// Enforcing is set to true once all endpoints present at the time the
// policy has been imported are enforcing this policy.
Enforcing bool `json:"enforcing,omitempty"`
// Annotations corresponds to the Annotations in the ObjectMeta of the CNP
// that have been realized on the node for CNP. That is, if a CNP has been
// imported and has been assigned annotation X=Y by the user,
// Annotations in CiliumNetworkPolicyNodeStatus will be X=Y once the
// CNP that was imported corresponding to Annotation X=Y has been realized on
// the node.
Annotations map[string]string `json:"annotations,omitempty"`
}
// CreateCNPNodeStatus returns a CiliumNetworkPolicyNodeStatus created from the
// provided fields.
func CreateCNPNodeStatus(enforcing, ok bool, cnpError error, rev uint64, annotations map[string]string) CiliumNetworkPolicyNodeStatus {
cnpns := CiliumNetworkPolicyNodeStatus{
Enforcing: enforcing,
Revision: rev,
OK: ok,
LastUpdated: slimv1.Now(),
Annotations: annotations,
}
if cnpError != nil {
cnpns.Error = cnpError.Error()
}
return cnpns
}
func (r *CiliumNetworkPolicy) String() string {
result := ""
result += fmt.Sprintf("TypeMeta: %s, ", r.TypeMeta.String())
result += fmt.Sprintf("ObjectMeta: %s, ", r.ObjectMeta.String())
if r.Spec != nil {
result += fmt.Sprintf("Spec: %v", *(r.Spec))
}
if r.Specs != nil {
result += fmt.Sprintf("Specs: %v", r.Specs)
}
result += fmt.Sprintf("Status: %v", r.Status)
return result
}
// SetDerivedPolicyStatus set the derivative policy status for the given
// derivative policy name.
func (r *CiliumNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) {
if r.Status.DerivativePolicies == nil {
r.Status.DerivativePolicies = map[string]CiliumNetworkPolicyNodeStatus{}
}
r.Status.DerivativePolicies[derivativePolicyName] = status
}
// Parse parses a CiliumNetworkPolicy and returns a list of cilium policy
// rules.
func (r *CiliumNetworkPolicy) Parse() (api.Rules, error) {
if r.ObjectMeta.Name == "" {
return nil, NewErrParse("CiliumNetworkPolicy must have name")
}
namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta)
// Temporary fix for CCNPs. See #12834.
// TL;DR. CCNPs are converted into SlimCNPs and end up here so we need to
// convert them back to CCNPs to allow proper parsing.
if namespace == "" {
ccnp := CiliumClusterwideNetworkPolicy{
TypeMeta: r.TypeMeta,
ObjectMeta: r.ObjectMeta,
Spec: r.Spec,
Specs: r.Specs,
Status: r.Status,
}
return ccnp.Parse()
}
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
retRules := api.Rules{}
if r.Spec == nil && r.Specs == nil {
return nil, ErrEmptyCNP
}
if r.Spec != nil {
if err := r.Spec.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumNetworkPolicy spec: %s", err))
}
if r.Spec.NodeSelector.LabelSelector != nil {
return nil, NewErrParse("Invalid CiliumNetworkPolicy spec: rule cannot have NodeSelector")
}
cr := k8sCiliumUtils.ParseToCiliumRule(namespace, name, uid, r.Spec)
retRules = append(retRules, cr)
}
if r.Specs != nil {
for _, rule := range r.Specs {
if err := rule.Sanitize(); err != nil {
return nil, NewErrParse(fmt.Sprintf("Invalid CiliumNetworkPolicy specs: %s", err))
}
cr := k8sCiliumUtils.ParseToCiliumRule(namespace, name, uid, rule)
retRules = append(retRules, cr)
}
}
return retRules, nil
}
// GetIdentityLabels returns all rule labels in the CiliumNetworkPolicy.
func (r *CiliumNetworkPolicy) GetIdentityLabels() labels.LabelArray {
namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta)
name := r.ObjectMeta.Name
uid := r.ObjectMeta.UID
// Even though the struct represents CiliumNetworkPolicy, we use it both for
// CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, so here we check for namespace
// to send correct derivedFrom label to get the correct policy labels.
derivedFrom := k8sCiliumUtils.ResourceTypeCiliumNetworkPolicy
if namespace == "" {
derivedFrom = k8sCiliumUtils.ResourceTypeCiliumClusterwideNetworkPolicy
}
return k8sCiliumUtils.GetPolicyLabels(namespace, name, uid, derivedFrom)
}
// RequiresDerivative return true if the CNP has any rule that will create a new
// derivative rule.
func (r *CiliumNetworkPolicy) RequiresDerivative() bool {
if r.Spec != nil {
if r.Spec.RequiresDerivative() {
return true
}
}
if r.Specs != nil {
for _, rule := range r.Specs {
if rule.RequiresDerivative() {
return true
}
}
}
return false
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumNetworkPolicyList is a list of CiliumNetworkPolicy objects.
type CiliumNetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumNetworkPolicy
Items []CiliumNetworkPolicy `json:"items"`
}
type PolicyConditionType string
const (
PolicyConditionValid PolicyConditionType = "Valid"
)
type NetworkPolicyCondition struct {
// The type of the policy condition
Type PolicyConditionType `json:"type"`
// The status of the condition, one of True, False, or Unknown
Status v1.ConditionStatus `json:"status"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime slimv1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty"`
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
var (
// ErrEmptyCNP is an error representing a CNP that is empty, which means it is
// missing both a `spec` and `specs` (both are nil).
ErrEmptyCNP = NewErrParse("Invalid CiliumNetworkPolicy spec(s): empty policy")
// ErrEmptyCCNP is an error representing a CCNP that is empty, which means it is
// missing both a `spec` and `specs` (both are nil).
ErrEmptyCCNP = NewErrParse("Invalid CiliumClusterwideNetworkPolicy spec(s): empty policy")
// ParsingErr is for comparison when checking error types.
ParsingErr = NewErrParse("")
)
// ErrParse is an error to describe where policy fails to parse due any invalid
// rule.
//
// +k8s:deepcopy-gen=false
// +deepequal-gen=false
type ErrParse struct {
msg string
}
// NewErrParse returns a new ErrParse.
func NewErrParse(msg string) ErrParse {
return ErrParse{
msg: msg,
}
}
// Error returns the error message for parsing
func (e ErrParse) Error() string {
return e.msg
}
// Is returns true if the given error is the type of 'ErrParse'.
func (_ ErrParse) Is(e error) bool {
_, ok := e.(ErrParse)
return ok
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
)
const (
// CustomResourceDefinitionGroup is the name of the third party resource group
CustomResourceDefinitionGroup = k8sconst.CustomResourceDefinitionGroup
// CustomResourceDefinitionVersion is the current version of the resource
CustomResourceDefinitionVersion = "v2"
// Cilium Network Policy (CNP)
// CNPPluralName is the plural name of Cilium Network Policy
CNPPluralName = "ciliumnetworkpolicies"
// CNPKindDefinition is the kind name for Cilium Network Policy
CNPKindDefinition = "CiliumNetworkPolicy"
// CNPName is the full name of Cilium Network Policy
CNPName = CNPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Cluster wide Network Policy (CCNP)
// CCNPPluralName is the plural name of Cilium Cluster wide Network Policy
CCNPPluralName = "ciliumclusterwidenetworkpolicies"
// CCNPKindDefinition is the kind name for Cilium Cluster wide Network Policy
CCNPKindDefinition = "CiliumClusterwideNetworkPolicy"
// CCNPName is the full name of Cilium Cluster wide Network Policy
CCNPName = CCNPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Egress Gateway Policy (CEGP)
// CEGPPluralName is the plural name of Cilium Egress Gateway Policy
CEGPPluralName = "ciliumegressgatewaypolicies"
// CEGPKindDefinition is the kind name of Cilium Egress Gateway Policy
CEGPKindDefinition = "CiliumEgressGatewayPolicy"
// CEGPName is the full name of Cilium Egress Gateway Policy
CEGPName = CEGPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Endpoint (CEP)
// CEPluralName is the plural name of Cilium Endpoint
CEPPluralName = "ciliumendpoints"
// CEKindDefinition is the kind name for Cilium Endpoint
CEPKindDefinition = "CiliumEndpoint"
// CEPName is the full name of Cilium Endpoint
CEPName = CEPPluralName + "." + CustomResourceDefinitionGroup
// Cilium Node (CN)
// CNPluralName is the plural name of Cilium Node
CNPluralName = "ciliumnodes"
// CNKindDefinition is the kind name for Cilium Node
CNKindDefinition = "CiliumNode"
// CNName is the full name of Cilium Node
CNName = CNPluralName + "." + CustomResourceDefinitionGroup
// Cilium Identity
// CIDPluralName is the plural name of Cilium Identity
CIDPluralName = "ciliumidentities"
// CIDKindDefinition is the kind name for Cilium Identity
CIDKindDefinition = "CiliumIdentity"
// CIDName is the full name of Cilium Identity
CIDName = CIDPluralName + "." + CustomResourceDefinitionGroup
// Cilium Local Redirect Policy (CLRP)
// CLRPPluralName is the plural name of Local Redirect Policy
CLRPPluralName = "ciliumlocalredirectpolicies"
// CLRPKindDefinition is the kind name for Local Redirect Policy
CLRPKindDefinition = "CiliumLocalRedirectPolicy"
// CLRPName is the full name of Local Redirect Policy
CLRPName = CLRPPluralName + "." + CustomResourceDefinitionGroup
// Cilium External Workload (CEW)
// CEWPluralName is the plural name of Cilium External Workload
CEWPluralName = "ciliumexternalworkloads"
// CEWKindDefinition is the kind name for Cilium External Workload
CEWKindDefinition = "CiliumExternalWorkload"
// CEWName is the full name of Cilium External Workload
CEWName = CEWPluralName + "." + CustomResourceDefinitionGroup
// Cilium Cluster Envoy Config (CCEC)
// CCECPluralName is the plural name of Cilium Clusterwide Envoy Config
CCECPluralName = "ciliumclusterwideenvoyconfigs"
// CCECKindDefinition is the kind name of Cilium Clusterwide Envoy Config
CCECKindDefinition = "CiliumClusterwideEnvoyConfig"
// CCECName is the full name of Cilium Clusterwide Envoy Config
CCECName = CCECPluralName + "." + CustomResourceDefinitionGroup
// Cilium Envoy Config (CEC)
// CECPluralName is the plural name of Cilium Envoy Config
CECPluralName = "ciliumenvoyconfigs"
// CECKindDefinition is the kind name of Cilium Envoy Config
CECKindDefinition = "CiliumEnvoyConfig"
// CECName is the full name of Cilium Envoy Config
CECName = CECPluralName + "." + CustomResourceDefinitionGroup
// CiliumNodeConfig (CNC)
// CNCPluralName is the plural name of Cilium Node Config
CNCPluralName = "ciliumnodeconfigs"
// CNCKindDefinition is the kind name of Cilium Node Config
CNCKindDefinition = "CiliumNodeConfig"
// CNCName is the full name of Cilium Node Config
CNCName = CNCPluralName + "." + CustomResourceDefinitionGroup
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{
Group: CustomResourceDefinitionGroup,
Version: CustomResourceDefinitionVersion,
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is needed by DeepCopy generator.
SchemeBuilder runtime.SchemeBuilder
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
localSchemeBuilder = &SchemeBuilder
// AddToScheme adds all types of this clientset into the given scheme.
// This allows composition of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CiliumNetworkPolicy{},
&CiliumNetworkPolicyList{},
&CiliumClusterwideNetworkPolicy{},
&CiliumClusterwideNetworkPolicyList{},
&CiliumEgressGatewayPolicy{},
&CiliumEgressGatewayPolicyList{},
&CiliumEndpoint{},
&CiliumEndpointList{},
&CiliumNode{},
&CiliumNodeList{},
&CiliumNodeConfig{},
&CiliumNodeConfigList{},
&CiliumExternalWorkload{},
&CiliumExternalWorkloadList{},
&CiliumIdentity{},
&CiliumIdentityList{},
&CiliumLocalRedirectPolicy{},
&CiliumLocalRedirectPolicyList{},
&CiliumEnvoyConfig{},
&CiliumEnvoyConfigList{},
&CiliumClusterwideEnvoyConfig{},
&CiliumClusterwideEnvoyConfigList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package v2
import (
"net"
"sort"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cilium/cilium/api/v1/models"
alibabaCloudTypes "github.com/cilium/cilium/pkg/alibabacloud/eni/types"
eniTypes "github.com/cilium/cilium/pkg/aws/eni/types"
azureTypes "github.com/cilium/cilium/pkg/azure/types"
ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
"github.com/cilium/cilium/pkg/node/addressing"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +kubebuilder:resource:categories={cilium},singular="ciliumendpoint",path="ciliumendpoints",scope="Namespaced",shortName={cep,ciliumep}
// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Security Identity",name="Security Identity",type=integer
// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string,priority=1
// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string,priority=1
// +kubebuilder:printcolumn:JSONPath=".status.state",description="Endpoint current state",name="Endpoint State",type=string
// +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv4",description="Endpoint IPv4 address",name="IPv4",type=string
// +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv6",description="Endpoint IPv6 address",name="IPv6",type=string
// +kubebuilder:storageversion
// CiliumEndpoint is the status of a Cilium policy rule.
type CiliumEndpoint struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// +kubebuilder:validation:Optional
Status EndpointStatus `json:"status"`
}
// EndpointPolicyState defines the state of the Policy mode: "enforcing", "non-enforcing", "disabled"
type EndpointPolicyState string
// EndpointStatus is the status of a Cilium endpoint.
type EndpointStatus struct {
// ID is the cilium-agent-local ID of the endpoint.
ID int64 `json:"id,omitempty"`
// Controllers is the list of failing controllers for this endpoint.
Controllers ControllerList `json:"controllers,omitempty"`
// ExternalIdentifiers is a set of identifiers to identify the endpoint
// apart from the pod name. This includes container runtime IDs.
ExternalIdentifiers *models.EndpointIdentifiers `json:"external-identifiers,omitempty"`
// Health is the overall endpoint & subcomponent health.
Health *models.EndpointHealth `json:"health,omitempty"`
// Identity is the security identity associated with the endpoint
Identity *EndpointIdentity `json:"identity,omitempty"`
// Log is the list of the last few warning and error log entries
Log []*models.EndpointStatusChange `json:"log,omitempty"`
// Networking is the networking properties of the endpoint.
//
// +kubebuilder:validation:Optional
Networking *EndpointNetworking `json:"networking,omitempty"`
// Encryption is the encryption configuration of the node
//
// +kubebuilder:validation:Optional
Encryption EncryptionSpec `json:"encryption,omitempty"`
Policy *EndpointPolicy `json:"policy,omitempty"`
// State is the state of the endpoint.
//
// +kubebuilder:validation:Enum=creating;waiting-for-identity;not-ready;waiting-to-regenerate;regenerating;restoring;ready;disconnecting;disconnected;invalid
State string `json:"state,omitempty"`
NamedPorts models.NamedPorts `json:"named-ports,omitempty"`
}
// +k8s:deepcopy-gen=false
// ControllerList is a list of ControllerStatus.
type ControllerList []ControllerStatus
// Sort sorts the ControllerList by controller name
func (c ControllerList) Sort() {
sort.Slice(c, func(i, j int) bool { return c[i].Name < c[j].Name })
}
// ControllerStatus is the status of a failing controller.
type ControllerStatus struct {
// Name is the name of the controller
Name string `json:"name,omitempty"`
// Configuration is the controller configuration
Configuration *models.ControllerStatusConfiguration `json:"configuration,omitempty"`
// Status is the status of the controller
Status ControllerStatusStatus `json:"status,omitempty"`
// UUID is the UUID of the controller
UUID string `json:"uuid,omitempty"`
}
// +k8s:deepcopy-gen=false
// ControllerStatusStatus is the detailed status section of a controller.
type ControllerStatusStatus struct {
ConsecutiveFailureCount int64 `json:"consecutive-failure-count,omitempty"`
FailureCount int64 `json:"failure-count,omitempty"`
LastFailureMsg string `json:"last-failure-msg,omitempty"`
LastFailureTimestamp string `json:"last-failure-timestamp,omitempty"`
LastSuccessTimestamp string `json:"last-success-timestamp,omitempty"`
SuccessCount int64 `json:"success-count,omitempty"`
}
// EndpointPolicy represents the endpoint's policy by listing all allowed
// ingress and egress identities in combination with L4 port and protocol.
type EndpointPolicy struct {
Ingress *EndpointPolicyDirection `json:"ingress,omitempty"`
Egress *EndpointPolicyDirection `json:"egress,omitempty"`
}
// EndpointPolicyDirection is the list of allowed identities per direction.
type EndpointPolicyDirection struct {
Enforcing bool `json:"enforcing"`
Allowed AllowedIdentityList `json:"allowed,omitempty"`
Denied DenyIdentityList `json:"denied,omitempty"`
// Deprecated
Removing AllowedIdentityList `json:"removing,omitempty"`
// Deprecated
Adding AllowedIdentityList `json:"adding,omitempty"`
State EndpointPolicyState `json:"state,omitempty"`
}
// IdentityTuple specifies a peer by identity, destination port and protocol.
type IdentityTuple struct {
Identity uint64 `json:"identity,omitempty"`
IdentityLabels map[string]string `json:"identity-labels,omitempty"`
DestPort uint16 `json:"dest-port,omitempty"`
Protocol uint8 `json:"protocol,omitempty"`
}
// +k8s:deepcopy-gen=false
// IdentityList is a list of IdentityTuple.
type IdentityList []IdentityTuple
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (a IdentityList) Sort() {
sort.Slice(a, func(i, j int) bool {
if a[i].Identity < a[j].Identity {
return true
} else if a[i].Identity == a[j].Identity {
if a[i].DestPort < a[j].DestPort {
return true
} else if a[i].DestPort == a[j].DestPort {
return a[i].Protocol < a[j].Protocol
}
}
return false
})
}
// +k8s:deepcopy-gen=false
// AllowedIdentityList is a list of IdentityTuples that species peers that are
// allowed.
type AllowedIdentityList IdentityList
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (a AllowedIdentityList) Sort() {
IdentityList(a).Sort()
}
// +k8s:deepcopy-gen=false
// DenyIdentityList is a list of IdentityTuples that species peers that are
// denied.
type DenyIdentityList IdentityList
// Sort sorts a list IdentityList by numeric identity, port and protocol.
func (d DenyIdentityList) Sort() {
IdentityList(d).Sort()
}
// EndpointIdentity is the identity information of an endpoint.
type EndpointIdentity struct {
// ID is the numeric identity of the endpoint
ID int64 `json:"id,omitempty"`
// Labels is the list of labels associated with the identity
Labels []string `json:"labels,omitempty"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumidentity",path="ciliumidentities",scope="Cluster",shortName={ciliumid}
// +kubebuilder:printcolumn:JSONPath=".metadata.labels.io\\.kubernetes\\.pod\\.namespace",description="The namespace of the entity",name="Namespace",type=string
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// CiliumIdentity is a CRD that represents an identity managed by Cilium.
// It is intended as a backing store for identity allocation, acting as the
// global coordination backend, and can be used in place of a KVStore (such as
// etcd).
// The name of the CRD is the numeric identity and the labels on the CRD object
// are the kubernetes sourced labels seen by cilium. This is currently the
// only label source possible when running under kubernetes. Non-kubernetes
// labels are filtered but all labels, from all sources, are places in the
// SecurityLabels field. These also include the source and are used to define
// the identity.
// The labels under metav1.ObjectMeta can be used when searching for
// CiliumIdentity instances that include particular labels. This can be done
// with invocations such as:
//
// kubectl get ciliumid -l 'foo=bar'
type CiliumIdentity struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// SecurityLabels is the source-of-truth set of labels for this identity.
SecurityLabels map[string]string `json:"security-labels"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumIdentityList is a list of CiliumIdentity objects.
type CiliumIdentityList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumIdentity
Items []CiliumIdentity `json:"items"`
}
// +k8s:deepcopy-gen=false
// AddressPair is a pair of IPv4 and/or IPv6 address.
type AddressPair struct {
IPV4 string `json:"ipv4,omitempty"`
IPV6 string `json:"ipv6,omitempty"`
}
// +k8s:deepcopy-gen=false
// AddressPairList is a list of address pairs.
type AddressPairList []*AddressPair
// Sort sorts an AddressPairList by IPv4 and IPv6 address.
func (a AddressPairList) Sort() {
sort.Slice(a, func(i, j int) bool {
if a[i].IPV4 < a[j].IPV4 {
return true
} else if a[i].IPV4 == a[j].IPV4 {
return a[i].IPV6 < a[j].IPV6
}
return false
})
}
// EndpointNetworking is the addressing information of an endpoint.
type EndpointNetworking struct {
// IP4/6 addresses assigned to this Endpoint
Addressing AddressPairList `json:"addressing"`
// NodeIP is the IP of the node the endpoint is running on. The IP must
// be reachable between nodes.
NodeIP string `json:"node,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=false
// +deepequal-gen=false
// CiliumEndpointList is a list of CiliumEndpoint objects.
type CiliumEndpointList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumEndpoint
Items []CiliumEndpoint `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:categories={cilium},singular="ciliumnode",path="ciliumnodes",scope="Cluster",shortName={cn,ciliumn}
// +kubebuilder:printcolumn:JSONPath=".spec.addresses[?(@.type==\"CiliumInternalIP\")].ip",description="Cilium internal IP for this node",name="CiliumInternalIP",type=string
// +kubebuilder:printcolumn:JSONPath=".spec.addresses[?(@.type==\"InternalIP\")].ip",description="IP of the node",name="InternalIP",type=string
// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="Time duration since creation of Ciliumnode",name="Age",type=date
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// CiliumNode represents a node managed by Cilium. It contains a specification
// to control various node specific configuration aspects and a status section
// to represent the status of the node.
type CiliumNode struct {
// +deepequal-gen=false
metav1.TypeMeta `json:",inline"`
// +deepequal-gen=false
metav1.ObjectMeta `json:"metadata"`
// Spec defines the desired specification/configuration of the node.
Spec NodeSpec `json:"spec"`
// Status defines the realized specification/configuration and status
// of the node.
//
// +kubebuilder:validation:Optional
Status NodeStatus `json:"status,omitempty"`
}
// NodeAddress is a node address.
type NodeAddress struct {
// Type is the type of the node address
Type addressing.AddressType `json:"type,omitempty"`
// IP is an IP of a node
IP string `json:"ip,omitempty"`
}
// NodeSpec is the configuration specific to a node.
type NodeSpec struct {
// InstanceID is the identifier of the node. This is different from the
// node name which is typically the FQDN of the node. The InstanceID
// typically refers to the identifier used by the cloud provider or
// some other means of identification.
InstanceID string `json:"instance-id,omitempty"`
// BootID is a unique node identifier generated on boot
//
// +kubebuilder:validation:Optional
BootID string `json:"bootid,omitempty"`
// Addresses is the list of all node addresses.
//
// +kubebuilder:validation:Optional
Addresses []NodeAddress `json:"addresses,omitempty"`
// HealthAddressing is the addressing information for health connectivity
// checking.
//
// +kubebuilder:validation:Optional
HealthAddressing HealthAddressingSpec `json:"health,omitempty"`
// IngressAddressing is the addressing information for Ingress listener.
//
// +kubebuilder:validation:Optional
IngressAddressing AddressPair `json:"ingress,omitempty"`
// Encryption is the encryption configuration of the node.
//
// +kubebuilder:validation:Optional
Encryption EncryptionSpec `json:"encryption,omitempty"`
// ENI is the AWS ENI specific configuration.
//
// +kubebuilder:validation:Optional
ENI eniTypes.ENISpec `json:"eni,omitempty"`
// Azure is the Azure IPAM specific configuration.
//
// +kubebuilder:validation:Optional
Azure azureTypes.AzureSpec `json:"azure,omitempty"`
// AlibabaCloud is the AlibabaCloud IPAM specific configuration.
//
// +kubebuilder:validation:Optional
AlibabaCloud alibabaCloudTypes.Spec `json:"alibaba-cloud,omitempty"`
// IPAM is the address management specification. This section can be
// populated by a user or it can be automatically populated by an IPAM
// operator.
//
// +kubebuilder:validation:Optional
IPAM ipamTypes.IPAMSpec `json:"ipam,omitempty"`
// NodeIdentity is the Cilium numeric identity allocated for the node, if any.
//
// +kubebuilder:validation:Optional
NodeIdentity uint64 `json:"nodeidentity,omitempty"`
}
// HealthAddressingSpec is the addressing information required to do
// connectivity health checking.
type HealthAddressingSpec struct {
// IPv4 is the IPv4 address of the IPv4 health endpoint.
//
// +kubebuilder:validation:Optional
IPv4 string `json:"ipv4,omitempty"`
// IPv6 is the IPv6 address of the IPv4 health endpoint.
//
// +kubebuilder:validation:Optional
IPv6 string `json:"ipv6,omitempty"`
}
// EncryptionSpec defines the encryption relevant configuration of a node.
type EncryptionSpec struct {
// Key is the index to the key to use for encryption or 0 if encryption is
// disabled.
//
// +kubebuilder:validation:Optional
Key int `json:"key,omitempty"`
}
// NodeStatus is the status of a node.
type NodeStatus struct {
// ENI is the AWS ENI specific status of the node.
//
// +kubebuilder:validation:Optional
ENI eniTypes.ENIStatus `json:"eni,omitempty"`
// Azure is the Azure specific status of the node.
//
// +kubebuilder:validation:Optional
Azure azureTypes.AzureStatus `json:"azure,omitempty"`
// IPAM is the IPAM status of the node.
//
// +kubebuilder:validation:Optional
IPAM ipamTypes.IPAMStatus `json:"ipam,omitempty"`
// AlibabaCloud is the AlibabaCloud specific status of the node.
//
// +kubebuilder:validation:Optional
AlibabaCloud alibabaCloudTypes.ENIStatus `json:"alibaba-cloud,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +deepequal-gen=false
// CiliumNodeList is a list of CiliumNode objects.
type CiliumNodeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
// Items is a list of CiliumNode
Items []CiliumNode `json:"items"`
}
// InstanceID returns the InstanceID of a CiliumNode.
func (n *CiliumNode) InstanceID() (instanceID string) {
if n != nil {
instanceID = n.Spec.InstanceID
// OBSOLETE: This fallback can be removed in Cilium 1.9
if instanceID == "" {
instanceID = n.Spec.ENI.InstanceID
}
}
return
}
func (n NodeAddress) ToString() string {
return n.IP
}
func (n NodeAddress) AddrType() addressing.AddressType {
return n.Type
}
// GetIP returns one of the CiliumNode's IP addresses available with the
// following priority:
// - NodeInternalIP
// - NodeExternalIP
// - other IP address type
// An error is returned if GetIP fails to extract an IP from the CiliumNode
// based on the provided address family.
func (n *CiliumNode) GetIP(ipv6 bool) net.IP {
return addressing.ExtractNodeIP[NodeAddress](n.Spec.Addresses, ipv6)
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package v2
import (
models "github.com/cilium/cilium/api/v1/models"
v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
api "github.com/cilium/cilium/pkg/policy/api"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfig) DeepCopyInto(out *CiliumClusterwideEnvoyConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideEnvoyConfig.
func (in *CiliumClusterwideEnvoyConfig) DeepCopy() *CiliumClusterwideEnvoyConfig {
if in == nil {
return nil
}
out := new(CiliumClusterwideEnvoyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideEnvoyConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopyInto(out *CiliumClusterwideEnvoyConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumClusterwideEnvoyConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideEnvoyConfigList.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopy() *CiliumClusterwideEnvoyConfigList {
if in == nil {
return nil
}
out := new(CiliumClusterwideEnvoyConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideEnvoyConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicy) DeepCopyInto(out *CiliumClusterwideNetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
if in.Specs != nil {
in, out := &in.Specs, &out.Specs
*out = make(api.Rules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
}
}
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideNetworkPolicy.
func (in *CiliumClusterwideNetworkPolicy) DeepCopy() *CiliumClusterwideNetworkPolicy {
if in == nil {
return nil
}
out := new(CiliumClusterwideNetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideNetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopyInto(out *CiliumClusterwideNetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumClusterwideNetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideNetworkPolicyList.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopy() *CiliumClusterwideNetworkPolicyList {
if in == nil {
return nil
}
out := new(CiliumClusterwideNetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumClusterwideNetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicy) DeepCopyInto(out *CiliumEgressGatewayPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicy.
func (in *CiliumEgressGatewayPolicy) DeepCopy() *CiliumEgressGatewayPolicy {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEgressGatewayPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicyList) DeepCopyInto(out *CiliumEgressGatewayPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEgressGatewayPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicyList.
func (in *CiliumEgressGatewayPolicyList) DeepCopy() *CiliumEgressGatewayPolicyList {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEgressGatewayPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEgressGatewayPolicySpec) DeepCopyInto(out *CiliumEgressGatewayPolicySpec) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]EgressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.DestinationCIDRs != nil {
in, out := &in.DestinationCIDRs, &out.DestinationCIDRs
*out = make([]IPv4CIDR, len(*in))
copy(*out, *in)
}
if in.ExcludedCIDRs != nil {
in, out := &in.ExcludedCIDRs, &out.ExcludedCIDRs
*out = make([]IPv4CIDR, len(*in))
copy(*out, *in)
}
if in.EgressGateway != nil {
in, out := &in.EgressGateway, &out.EgressGateway
*out = new(EgressGateway)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicySpec.
func (in *CiliumEgressGatewayPolicySpec) DeepCopy() *CiliumEgressGatewayPolicySpec {
if in == nil {
return nil
}
out := new(CiliumEgressGatewayPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEndpoint) DeepCopyInto(out *CiliumEndpoint) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpoint.
func (in *CiliumEndpoint) DeepCopy() *CiliumEndpoint {
if in == nil {
return nil
}
out := new(CiliumEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEndpoint) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEndpointList) DeepCopyInto(out *CiliumEndpointList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEndpoint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpointList.
func (in *CiliumEndpointList) DeepCopy() *CiliumEndpointList {
if in == nil {
return nil
}
out := new(CiliumEndpointList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEndpointList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfig) DeepCopyInto(out *CiliumEnvoyConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfig.
func (in *CiliumEnvoyConfig) DeepCopy() *CiliumEnvoyConfig {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEnvoyConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfigList) DeepCopyInto(out *CiliumEnvoyConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumEnvoyConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfigList.
func (in *CiliumEnvoyConfigList) DeepCopy() *CiliumEnvoyConfigList {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumEnvoyConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumEnvoyConfigSpec) DeepCopyInto(out *CiliumEnvoyConfigSpec) {
*out = *in
if in.Services != nil {
in, out := &in.Services, &out.Services
*out = make([]*ServiceListener, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ServiceListener)
(*in).DeepCopyInto(*out)
}
}
}
if in.BackendServices != nil {
in, out := &in.BackendServices, &out.BackendServices
*out = make([]*Service, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Service)
(*in).DeepCopyInto(*out)
}
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]XDSResource, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfigSpec.
func (in *CiliumEnvoyConfigSpec) DeepCopy() *CiliumEnvoyConfigSpec {
if in == nil {
return nil
}
out := new(CiliumEnvoyConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumExternalWorkload) DeepCopyInto(out *CiliumExternalWorkload) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkload.
func (in *CiliumExternalWorkload) DeepCopy() *CiliumExternalWorkload {
if in == nil {
return nil
}
out := new(CiliumExternalWorkload)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumExternalWorkload) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumExternalWorkloadList) DeepCopyInto(out *CiliumExternalWorkloadList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumExternalWorkload, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadList.
func (in *CiliumExternalWorkloadList) DeepCopy() *CiliumExternalWorkloadList {
if in == nil {
return nil
}
out := new(CiliumExternalWorkloadList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumExternalWorkloadList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumExternalWorkloadSpec) DeepCopyInto(out *CiliumExternalWorkloadSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadSpec.
func (in *CiliumExternalWorkloadSpec) DeepCopy() *CiliumExternalWorkloadSpec {
if in == nil {
return nil
}
out := new(CiliumExternalWorkloadSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumExternalWorkloadStatus) DeepCopyInto(out *CiliumExternalWorkloadStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadStatus.
func (in *CiliumExternalWorkloadStatus) DeepCopy() *CiliumExternalWorkloadStatus {
if in == nil {
return nil
}
out := new(CiliumExternalWorkloadStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumIdentity) DeepCopyInto(out *CiliumIdentity) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.SecurityLabels != nil {
in, out := &in.SecurityLabels, &out.SecurityLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumIdentity.
func (in *CiliumIdentity) DeepCopy() *CiliumIdentity {
if in == nil {
return nil
}
out := new(CiliumIdentity)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumIdentity) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumIdentityList) DeepCopyInto(out *CiliumIdentityList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumIdentity, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumIdentityList.
func (in *CiliumIdentityList) DeepCopy() *CiliumIdentityList {
if in == nil {
return nil
}
out := new(CiliumIdentityList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumIdentityList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicy) DeepCopyInto(out *CiliumLocalRedirectPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicy.
func (in *CiliumLocalRedirectPolicy) DeepCopy() *CiliumLocalRedirectPolicy {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLocalRedirectPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicyList) DeepCopyInto(out *CiliumLocalRedirectPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumLocalRedirectPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicyList.
func (in *CiliumLocalRedirectPolicyList) DeepCopy() *CiliumLocalRedirectPolicyList {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumLocalRedirectPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicySpec) DeepCopyInto(out *CiliumLocalRedirectPolicySpec) {
*out = *in
in.RedirectFrontend.DeepCopyInto(&out.RedirectFrontend)
in.RedirectBackend.DeepCopyInto(&out.RedirectBackend)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicySpec.
func (in *CiliumLocalRedirectPolicySpec) DeepCopy() *CiliumLocalRedirectPolicySpec {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumLocalRedirectPolicyStatus) DeepCopyInto(out *CiliumLocalRedirectPolicyStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicyStatus.
func (in *CiliumLocalRedirectPolicyStatus) DeepCopy() *CiliumLocalRedirectPolicyStatus {
if in == nil {
return nil
}
out := new(CiliumLocalRedirectPolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicy) DeepCopyInto(out *CiliumNetworkPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
if in.Specs != nil {
in, out := &in.Specs, &out.Specs
*out = make(api.Rules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(api.Rule)
(*in).DeepCopyInto(*out)
}
}
}
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicy.
func (in *CiliumNetworkPolicy) DeepCopy() *CiliumNetworkPolicy {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNetworkPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyList) DeepCopyInto(out *CiliumNetworkPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNetworkPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyList.
func (in *CiliumNetworkPolicyList) DeepCopy() *CiliumNetworkPolicyList {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNetworkPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyNodeStatus) DeepCopyInto(out *CiliumNetworkPolicyNodeStatus) {
*out = *in
in.LastUpdated.DeepCopyInto(&out.LastUpdated)
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyNodeStatus.
func (in *CiliumNetworkPolicyNodeStatus) DeepCopy() *CiliumNetworkPolicyNodeStatus {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyNodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNetworkPolicyStatus) DeepCopyInto(out *CiliumNetworkPolicyStatus) {
*out = *in
if in.DerivativePolicies != nil {
in, out := &in.DerivativePolicies, &out.DerivativePolicies
*out = make(map[string]CiliumNetworkPolicyNodeStatus, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]NetworkPolicyCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyStatus.
func (in *CiliumNetworkPolicyStatus) DeepCopy() *CiliumNetworkPolicyStatus {
if in == nil {
return nil
}
out := new(CiliumNetworkPolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNode) DeepCopyInto(out *CiliumNode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNode.
func (in *CiliumNode) DeepCopy() *CiliumNode {
if in == nil {
return nil
}
out := new(CiliumNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNode) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfig) DeepCopyInto(out *CiliumNodeConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfig.
func (in *CiliumNodeConfig) DeepCopy() *CiliumNodeConfig {
if in == nil {
return nil
}
out := new(CiliumNodeConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfigList) DeepCopyInto(out *CiliumNodeConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNodeConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigList.
func (in *CiliumNodeConfigList) DeepCopy() *CiliumNodeConfigList {
if in == nil {
return nil
}
out := new(CiliumNodeConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeConfigSpec) DeepCopyInto(out *CiliumNodeConfigSpec) {
*out = *in
if in.Defaults != nil {
in, out := &in.Defaults, &out.Defaults
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigSpec.
func (in *CiliumNodeConfigSpec) DeepCopy() *CiliumNodeConfigSpec {
if in == nil {
return nil
}
out := new(CiliumNodeConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumNodeList) DeepCopyInto(out *CiliumNodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CiliumNode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeList.
func (in *CiliumNodeList) DeepCopy() *CiliumNodeList {
if in == nil {
return nil
}
out := new(CiliumNodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CiliumNodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControllerStatus) DeepCopyInto(out *ControllerStatus) {
*out = *in
if in.Configuration != nil {
in, out := &in.Configuration, &out.Configuration
*out = new(models.ControllerStatusConfiguration)
**out = **in
}
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatus.
func (in *ControllerStatus) DeepCopy() *ControllerStatus {
if in == nil {
return nil
}
out := new(ControllerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressGateway) DeepCopyInto(out *EgressGateway) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressGateway.
func (in *EgressGateway) DeepCopy() *EgressGateway {
if in == nil {
return nil
}
out := new(EgressGateway)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EgressRule) DeepCopyInto(out *EgressRule) {
*out = *in
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.PodSelector != nil {
in, out := &in.PodSelector, &out.PodSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRule.
func (in *EgressRule) DeepCopy() *EgressRule {
if in == nil {
return nil
}
out := new(EgressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionSpec) DeepCopyInto(out *EncryptionSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSpec.
func (in *EncryptionSpec) DeepCopy() *EncryptionSpec {
if in == nil {
return nil
}
out := new(EncryptionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointIdentity) DeepCopyInto(out *EndpointIdentity) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointIdentity.
func (in *EndpointIdentity) DeepCopy() *EndpointIdentity {
if in == nil {
return nil
}
out := new(EndpointIdentity)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointNetworking) DeepCopyInto(out *EndpointNetworking) {
*out = *in
if in.Addressing != nil {
in, out := &in.Addressing, &out.Addressing
*out = make(AddressPairList, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(AddressPair)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointNetworking.
func (in *EndpointNetworking) DeepCopy() *EndpointNetworking {
if in == nil {
return nil
}
out := new(EndpointNetworking)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPolicy) DeepCopyInto(out *EndpointPolicy) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = new(EndpointPolicyDirection)
(*in).DeepCopyInto(*out)
}
if in.Egress != nil {
in, out := &in.Egress, &out.Egress
*out = new(EndpointPolicyDirection)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicy.
func (in *EndpointPolicy) DeepCopy() *EndpointPolicy {
if in == nil {
return nil
}
out := new(EndpointPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointPolicyDirection) DeepCopyInto(out *EndpointPolicyDirection) {
*out = *in
if in.Allowed != nil {
in, out := &in.Allowed, &out.Allowed
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Denied != nil {
in, out := &in.Denied, &out.Denied
*out = make(DenyIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Removing != nil {
in, out := &in.Removing, &out.Removing
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Adding != nil {
in, out := &in.Adding, &out.Adding
*out = make(AllowedIdentityList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicyDirection.
func (in *EndpointPolicyDirection) DeepCopy() *EndpointPolicyDirection {
if in == nil {
return nil
}
out := new(EndpointPolicyDirection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) {
*out = *in
if in.Controllers != nil {
in, out := &in.Controllers, &out.Controllers
*out = make(ControllerList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExternalIdentifiers != nil {
in, out := &in.ExternalIdentifiers, &out.ExternalIdentifiers
*out = new(models.EndpointIdentifiers)
**out = **in
}
if in.Health != nil {
in, out := &in.Health, &out.Health
*out = new(models.EndpointHealth)
**out = **in
}
if in.Identity != nil {
in, out := &in.Identity, &out.Identity
*out = new(EndpointIdentity)
(*in).DeepCopyInto(*out)
}
if in.Log != nil {
in, out := &in.Log, &out.Log
*out = make([]*models.EndpointStatusChange, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(models.EndpointStatusChange)
**out = **in
}
}
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(EndpointNetworking)
(*in).DeepCopyInto(*out)
}
out.Encryption = in.Encryption
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(EndpointPolicy)
(*in).DeepCopyInto(*out)
}
if in.NamedPorts != nil {
in, out := &in.NamedPorts, &out.NamedPorts
*out = make(models.NamedPorts, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(models.Port)
**out = **in
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus.
func (in *EndpointStatus) DeepCopy() *EndpointStatus {
if in == nil {
return nil
}
out := new(EndpointStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Frontend) DeepCopyInto(out *Frontend) {
*out = *in
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Frontend.
func (in *Frontend) DeepCopy() *Frontend {
if in == nil {
return nil
}
out := new(Frontend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HealthAddressingSpec) DeepCopyInto(out *HealthAddressingSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthAddressingSpec.
func (in *HealthAddressingSpec) DeepCopy() *HealthAddressingSpec {
if in == nil {
return nil
}
out := new(HealthAddressingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IdentityTuple) DeepCopyInto(out *IdentityTuple) {
*out = *in
if in.IdentityLabels != nil {
in, out := &in.IdentityLabels, &out.IdentityLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityTuple.
func (in *IdentityTuple) DeepCopy() *IdentityTuple {
if in == nil {
return nil
}
out := new(IdentityTuple)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkPolicyCondition) DeepCopyInto(out *NetworkPolicyCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyCondition.
func (in *NetworkPolicyCondition) DeepCopy() *NetworkPolicyCondition {
if in == nil {
return nil
}
out := new(NetworkPolicyCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
func (in *NodeAddress) DeepCopy() *NodeAddress {
if in == nil {
return nil
}
out := new(NodeAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]NodeAddress, len(*in))
copy(*out, *in)
}
out.HealthAddressing = in.HealthAddressing
out.IngressAddressing = in.IngressAddressing
out.Encryption = in.Encryption
in.ENI.DeepCopyInto(&out.ENI)
out.Azure = in.Azure
in.AlibabaCloud.DeepCopyInto(&out.AlibabaCloud)
in.IPAM.DeepCopyInto(&out.IPAM)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
func (in *NodeSpec) DeepCopy() *NodeSpec {
if in == nil {
return nil
}
out := new(NodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = *in
in.ENI.DeepCopyInto(&out.ENI)
in.Azure.DeepCopyInto(&out.Azure)
in.IPAM.DeepCopyInto(&out.IPAM)
in.AlibabaCloud.DeepCopyInto(&out.AlibabaCloud)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
func (in *NodeStatus) DeepCopy() *NodeStatus {
if in == nil {
return nil
}
out := new(NodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortInfo) DeepCopyInto(out *PortInfo) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortInfo.
func (in *PortInfo) DeepCopy() *PortInfo {
if in == nil {
return nil
}
out := new(PortInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RedirectBackend) DeepCopyInto(out *RedirectBackend) {
*out = *in
in.LocalEndpointSelector.DeepCopyInto(&out.LocalEndpointSelector)
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectBackend.
func (in *RedirectBackend) DeepCopy() *RedirectBackend {
if in == nil {
return nil
}
out := new(RedirectBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RedirectFrontend) DeepCopyInto(out *RedirectFrontend) {
*out = *in
if in.AddressMatcher != nil {
in, out := &in.AddressMatcher, &out.AddressMatcher
*out = new(Frontend)
(*in).DeepCopyInto(*out)
}
if in.ServiceMatcher != nil {
in, out := &in.ServiceMatcher, &out.ServiceMatcher
*out = new(ServiceInfo)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectFrontend.
func (in *RedirectFrontend) DeepCopy() *RedirectFrontend {
if in == nil {
return nil
}
out := new(RedirectFrontend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Service) DeepCopyInto(out *Service) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
func (in *Service) DeepCopy() *Service {
if in == nil {
return nil
}
out := new(Service)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceInfo) DeepCopyInto(out *ServiceInfo) {
*out = *in
if in.ToPorts != nil {
in, out := &in.ToPorts, &out.ToPorts
*out = make([]PortInfo, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInfo.
func (in *ServiceInfo) DeepCopy() *ServiceInfo {
if in == nil {
return nil
}
out := new(ServiceInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceListener) DeepCopyInto(out *ServiceListener) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]uint16, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceListener.
func (in *ServiceListener) DeepCopy() *ServiceListener {
if in == nil {
return nil
}
out := new(ServiceListener)
in.DeepCopyInto(out)
return out
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XDSResource.
func (in *XDSResource) DeepCopy() *XDSResource {
if in == nil {
return nil
}
out := new(XDSResource)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package v2
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AddressPair) DeepEqual(other *AddressPair) bool {
if other == nil {
return false
}
if in.IPV4 != other.IPV4 {
return false
}
if in.IPV6 != other.IPV6 {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AddressPairList) DeepEqual(other *AddressPairList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *AllowedIdentityList) DeepEqual(other *AllowedIdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumClusterwideEnvoyConfig) DeepEqual(other *CiliumClusterwideEnvoyConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumClusterwideNetworkPolicy) deepEqual(other *CiliumClusterwideNetworkPolicy) bool {
if other == nil {
return false
}
if (in.Spec == nil) != (other.Spec == nil) {
return false
} else if in.Spec != nil {
if !in.Spec.DeepEqual(other.Spec) {
return false
}
}
if ((in.Specs != nil) && (other.Specs != nil)) || ((in.Specs == nil) != (other.Specs == nil)) {
in, other := &in.Specs, &other.Specs
if other == nil || !in.DeepEqual(other) {
return false
}
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEgressGatewayPolicy) DeepEqual(other *CiliumEgressGatewayPolicy) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEgressGatewayPolicySpec) DeepEqual(other *CiliumEgressGatewayPolicySpec) bool {
if other == nil {
return false
}
if ((in.Selectors != nil) && (other.Selectors != nil)) || ((in.Selectors == nil) != (other.Selectors == nil)) {
in, other := &in.Selectors, &other.Selectors
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if ((in.DestinationCIDRs != nil) && (other.DestinationCIDRs != nil)) || ((in.DestinationCIDRs == nil) != (other.DestinationCIDRs == nil)) {
in, other := &in.DestinationCIDRs, &other.DestinationCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if ((in.ExcludedCIDRs != nil) && (other.ExcludedCIDRs != nil)) || ((in.ExcludedCIDRs == nil) != (other.ExcludedCIDRs == nil)) {
in, other := &in.ExcludedCIDRs, &other.ExcludedCIDRs
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if (in.EgressGateway == nil) != (other.EgressGateway == nil) {
return false
} else if in.EgressGateway != nil {
if !in.EgressGateway.DeepEqual(other.EgressGateway) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEndpoint) DeepEqual(other *CiliumEndpoint) bool {
if other == nil {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEnvoyConfig) DeepEqual(other *CiliumEnvoyConfig) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumEnvoyConfigSpec) DeepEqual(other *CiliumEnvoyConfigSpec) bool {
if other == nil {
return false
}
if ((in.Services != nil) && (other.Services != nil)) || ((in.Services == nil) != (other.Services == nil)) {
in, other := &in.Services, &other.Services
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if ((in.BackendServices != nil) && (other.BackendServices != nil)) || ((in.BackendServices == nil) != (other.BackendServices == nil)) {
in, other := &in.BackendServices, &other.BackendServices
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if ((in.Resources != nil) && (other.Resources != nil)) || ((in.Resources == nil) != (other.Resources == nil)) {
in, other := &in.Resources, &other.Resources
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumExternalWorkload) DeepEqual(other *CiliumExternalWorkload) bool {
if other == nil {
return false
}
if in.Spec != other.Spec {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumExternalWorkloadSpec) DeepEqual(other *CiliumExternalWorkloadSpec) bool {
if other == nil {
return false
}
if in.IPv4AllocCIDR != other.IPv4AllocCIDR {
return false
}
if in.IPv6AllocCIDR != other.IPv6AllocCIDR {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumExternalWorkloadStatus) DeepEqual(other *CiliumExternalWorkloadStatus) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if in.IP != other.IP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumIdentity) DeepEqual(other *CiliumIdentity) bool {
if other == nil {
return false
}
if ((in.SecurityLabels != nil) && (other.SecurityLabels != nil)) || ((in.SecurityLabels == nil) != (other.SecurityLabels == nil)) {
in, other := &in.SecurityLabels, &other.SecurityLabels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicy) DeepEqual(other *CiliumLocalRedirectPolicy) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicySpec) DeepEqual(other *CiliumLocalRedirectPolicySpec) bool {
if other == nil {
return false
}
if !in.RedirectFrontend.DeepEqual(&other.RedirectFrontend) {
return false
}
if !in.RedirectBackend.DeepEqual(&other.RedirectBackend) {
return false
}
if in.SkipRedirectFromBackend != other.SkipRedirectFromBackend {
return false
}
if in.Description != other.Description {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumLocalRedirectPolicyStatus) DeepEqual(other *CiliumLocalRedirectPolicyStatus) bool {
if other == nil {
return false
}
if in.OK != other.OK {
return false
}
return true
}
// deepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicy) deepEqual(other *CiliumNetworkPolicy) bool {
if other == nil {
return false
}
if (in.Spec == nil) != (other.Spec == nil) {
return false
} else if in.Spec != nil {
if !in.Spec.DeepEqual(other.Spec) {
return false
}
}
if ((in.Specs != nil) && (other.Specs != nil)) || ((in.Specs == nil) != (other.Specs == nil)) {
in, other := &in.Specs, &other.Specs
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicyNodeStatus) DeepEqual(other *CiliumNetworkPolicyNodeStatus) bool {
if other == nil {
return false
}
if in.OK != other.OK {
return false
}
if in.Error != other.Error {
return false
}
if !in.LastUpdated.DeepEqual(&other.LastUpdated) {
return false
}
if in.Revision != other.Revision {
return false
}
if in.Enforcing != other.Enforcing {
return false
}
if ((in.Annotations != nil) && (other.Annotations != nil)) || ((in.Annotations == nil) != (other.Annotations == nil)) {
in, other := &in.Annotations, &other.Annotations
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNetworkPolicyStatus) DeepEqual(other *CiliumNetworkPolicyStatus) bool {
if other == nil {
return false
}
if ((in.DerivativePolicies != nil) && (other.DerivativePolicies != nil)) || ((in.DerivativePolicies == nil) != (other.DerivativePolicies == nil)) {
in, other := &in.DerivativePolicies, &other.DerivativePolicies
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
}
if ((in.Conditions != nil) && (other.Conditions != nil)) || ((in.Conditions == nil) != (other.Conditions == nil)) {
in, other := &in.Conditions, &other.Conditions
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *CiliumNode) DeepEqual(other *CiliumNode) bool {
if other == nil {
return false
}
if !in.Spec.DeepEqual(&other.Spec) {
return false
}
if !in.Status.DeepEqual(&other.Status) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerList) DeepEqual(other *ControllerList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatus) DeepEqual(other *ControllerStatus) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if (in.Configuration == nil) != (other.Configuration == nil) {
return false
} else if in.Configuration != nil {
if !in.Configuration.DeepEqual(other.Configuration) {
return false
}
}
if in.Status != other.Status {
return false
}
if in.UUID != other.UUID {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ControllerStatusStatus) DeepEqual(other *ControllerStatusStatus) bool {
if other == nil {
return false
}
if in.ConsecutiveFailureCount != other.ConsecutiveFailureCount {
return false
}
if in.FailureCount != other.FailureCount {
return false
}
if in.LastFailureMsg != other.LastFailureMsg {
return false
}
if in.LastFailureTimestamp != other.LastFailureTimestamp {
return false
}
if in.LastSuccessTimestamp != other.LastSuccessTimestamp {
return false
}
if in.SuccessCount != other.SuccessCount {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *DenyIdentityList) DeepEqual(other *DenyIdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressGateway) DeepEqual(other *EgressGateway) bool {
if other == nil {
return false
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
if in.Interface != other.Interface {
return false
}
if in.EgressIP != other.EgressIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EgressRule) DeepEqual(other *EgressRule) bool {
if other == nil {
return false
}
if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) {
return false
} else if in.NamespaceSelector != nil {
if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) {
return false
}
}
if (in.PodSelector == nil) != (other.PodSelector == nil) {
return false
} else if in.PodSelector != nil {
if !in.PodSelector.DeepEqual(other.PodSelector) {
return false
}
}
if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
return false
} else if in.NodeSelector != nil {
if !in.NodeSelector.DeepEqual(other.NodeSelector) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EncryptionSpec) DeepEqual(other *EncryptionSpec) bool {
if other == nil {
return false
}
if in.Key != other.Key {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointIdentity) DeepEqual(other *EndpointIdentity) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
in, other := &in.Labels, &other.Labels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointNetworking) DeepEqual(other *EndpointNetworking) bool {
if other == nil {
return false
}
if ((in.Addressing != nil) && (other.Addressing != nil)) || ((in.Addressing == nil) != (other.Addressing == nil)) {
in, other := &in.Addressing, &other.Addressing
if other == nil || !in.DeepEqual(other) {
return false
}
}
if in.NodeIP != other.NodeIP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointPolicy) DeepEqual(other *EndpointPolicy) bool {
if other == nil {
return false
}
if (in.Ingress == nil) != (other.Ingress == nil) {
return false
} else if in.Ingress != nil {
if !in.Ingress.DeepEqual(other.Ingress) {
return false
}
}
if (in.Egress == nil) != (other.Egress == nil) {
return false
} else if in.Egress != nil {
if !in.Egress.DeepEqual(other.Egress) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointPolicyDirection) DeepEqual(other *EndpointPolicyDirection) bool {
if other == nil {
return false
}
if in.Enforcing != other.Enforcing {
return false
}
if ((in.Allowed != nil) && (other.Allowed != nil)) || ((in.Allowed == nil) != (other.Allowed == nil)) {
in, other := &in.Allowed, &other.Allowed
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Denied != nil) && (other.Denied != nil)) || ((in.Denied == nil) != (other.Denied == nil)) {
in, other := &in.Denied, &other.Denied
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Removing != nil) && (other.Removing != nil)) || ((in.Removing == nil) != (other.Removing == nil)) {
in, other := &in.Removing, &other.Removing
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Adding != nil) && (other.Adding != nil)) || ((in.Adding == nil) != (other.Adding == nil)) {
in, other := &in.Adding, &other.Adding
if other == nil || !in.DeepEqual(other) {
return false
}
}
if in.State != other.State {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *EndpointStatus) DeepEqual(other *EndpointStatus) bool {
if other == nil {
return false
}
if in.ID != other.ID {
return false
}
if ((in.Controllers != nil) && (other.Controllers != nil)) || ((in.Controllers == nil) != (other.Controllers == nil)) {
in, other := &in.Controllers, &other.Controllers
if other == nil || !in.DeepEqual(other) {
return false
}
}
if (in.ExternalIdentifiers == nil) != (other.ExternalIdentifiers == nil) {
return false
} else if in.ExternalIdentifiers != nil {
if !in.ExternalIdentifiers.DeepEqual(other.ExternalIdentifiers) {
return false
}
}
if (in.Health == nil) != (other.Health == nil) {
return false
} else if in.Health != nil {
if !in.Health.DeepEqual(other.Health) {
return false
}
}
if (in.Identity == nil) != (other.Identity == nil) {
return false
} else if in.Identity != nil {
if !in.Identity.DeepEqual(other.Identity) {
return false
}
}
if ((in.Log != nil) && (other.Log != nil)) || ((in.Log == nil) != (other.Log == nil)) {
in, other := &in.Log, &other.Log
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual((*other)[i]) {
return false
}
}
}
}
if (in.Networking == nil) != (other.Networking == nil) {
return false
} else if in.Networking != nil {
if !in.Networking.DeepEqual(other.Networking) {
return false
}
}
if in.Encryption != other.Encryption {
return false
}
if (in.Policy == nil) != (other.Policy == nil) {
return false
} else if in.Policy != nil {
if !in.Policy.DeepEqual(other.Policy) {
return false
}
}
if in.State != other.State {
return false
}
if ((in.NamedPorts != nil) && (other.NamedPorts != nil)) || ((in.NamedPorts == nil) != (other.NamedPorts == nil)) {
in, other := &in.NamedPorts, &other.NamedPorts
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Frontend) DeepEqual(other *Frontend) bool {
if other == nil {
return false
}
if in.IP != other.IP {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *HealthAddressingSpec) DeepEqual(other *HealthAddressingSpec) bool {
if other == nil {
return false
}
if in.IPv4 != other.IPv4 {
return false
}
if in.IPv6 != other.IPv6 {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IdentityList) DeepEqual(other *IdentityList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *IdentityTuple) DeepEqual(other *IdentityTuple) bool {
if other == nil {
return false
}
if in.Identity != other.Identity {
return false
}
if ((in.IdentityLabels != nil) && (other.IdentityLabels != nil)) || ((in.IdentityLabels == nil) != (other.IdentityLabels == nil)) {
in, other := &in.IdentityLabels, &other.IdentityLabels
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
}
if in.DestPort != other.DestPort {
return false
}
if in.Protocol != other.Protocol {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NetworkPolicyCondition) DeepEqual(other *NetworkPolicyCondition) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.Status != other.Status {
return false
}
if !in.LastTransitionTime.DeepEqual(&other.LastTransitionTime) {
return false
}
if in.Reason != other.Reason {
return false
}
if in.Message != other.Message {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeAddress) DeepEqual(other *NodeAddress) bool {
if other == nil {
return false
}
if in.Type != other.Type {
return false
}
if in.IP != other.IP {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeSpec) DeepEqual(other *NodeSpec) bool {
if other == nil {
return false
}
if in.InstanceID != other.InstanceID {
return false
}
if in.BootID != other.BootID {
return false
}
if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
in, other := &in.Addresses, &other.Addresses
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.HealthAddressing != other.HealthAddressing {
return false
}
if in.IngressAddressing != other.IngressAddressing {
return false
}
if in.Encryption != other.Encryption {
return false
}
if !in.ENI.DeepEqual(&other.ENI) {
return false
}
if in.Azure != other.Azure {
return false
}
if !in.AlibabaCloud.DeepEqual(&other.AlibabaCloud) {
return false
}
if !in.IPAM.DeepEqual(&other.IPAM) {
return false
}
if in.NodeIdentity != other.NodeIdentity {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *NodeStatus) DeepEqual(other *NodeStatus) bool {
if other == nil {
return false
}
if !in.ENI.DeepEqual(&other.ENI) {
return false
}
if !in.Azure.DeepEqual(&other.Azure) {
return false
}
if !in.IPAM.DeepEqual(&other.IPAM) {
return false
}
if !in.AlibabaCloud.DeepEqual(&other.AlibabaCloud) {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *PortInfo) DeepEqual(other *PortInfo) bool {
if other == nil {
return false
}
if in.Port != other.Port {
return false
}
if in.Protocol != other.Protocol {
return false
}
if in.Name != other.Name {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *RedirectBackend) DeepEqual(other *RedirectBackend) bool {
if other == nil {
return false
}
if !in.LocalEndpointSelector.DeepEqual(&other.LocalEndpointSelector) {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *RedirectFrontend) DeepEqual(other *RedirectFrontend) bool {
if other == nil {
return false
}
if (in.AddressMatcher == nil) != (other.AddressMatcher == nil) {
return false
} else if in.AddressMatcher != nil {
if !in.AddressMatcher.DeepEqual(other.AddressMatcher) {
return false
}
}
if (in.ServiceMatcher == nil) != (other.ServiceMatcher == nil) {
return false
} else if in.ServiceMatcher != nil {
if !in.ServiceMatcher.DeepEqual(other.ServiceMatcher) {
return false
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Service) DeepEqual(other *Service) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceInfo) DeepEqual(other *ServiceInfo) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
in, other := &in.ToPorts, &other.ToPorts
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ServiceListener) DeepEqual(other *ServiceListener) bool {
if other == nil {
return false
}
if in.Name != other.Name {
return false
}
if in.Namespace != other.Namespace {
return false
}
if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
in, other := &in.Ports, &other.Ports
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
if in.Listener != other.Listener {
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package labels
import (
"fmt"
"sort"
"strings"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// Labels allows you to present labels independently from their storage.
type Labels interface {
// Has returns whether the provided label exists.
Has(label string) (exists bool)
// Get returns the value for the provided label.
Get(label string) (value string)
}
// Set is a map of label:value. It implements Labels.
type Set map[string]string
// String returns all labels listed as a human readable string.
// Conveniently, exactly the format that ParseSelector takes.
func (ls Set) String() string {
selector := make([]string, 0, len(ls))
for key, value := range ls {
selector = append(selector, key+"="+value)
}
// Sort for determinism.
sort.StringSlice(selector).Sort()
return strings.Join(selector, ",")
}
// Has returns whether the provided label exists in the map.
func (ls Set) Has(label string) bool {
_, exists := ls[label]
return exists
}
// Get returns the value in the map for the provided label.
func (ls Set) Get(label string) string {
return ls[label]
}
// AsSelector converts labels into a selectors. It does not
// perform any validation, which means the server will reject
// the request if the Set contains invalid values.
func (ls Set) AsSelector() Selector {
return SelectorFromSet(ls)
}
// AsValidatedSelector converts labels into a selectors.
// The Set is validated client-side, which allows to catch errors early.
func (ls Set) AsValidatedSelector() (Selector, error) {
return ValidatedSelectorFromSet(ls)
}
// AsSelectorPreValidated converts labels into a selector, but
// assumes that labels are already validated and thus doesn't
// perform any validation.
// According to our measurements this is significantly faster
// in codepaths that matter at high scale.
// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector
// instead, which does not copy.
func (ls Set) AsSelectorPreValidated() Selector {
return SelectorFromValidatedSet(ls)
}
// FormatLabels converts label map into plain string
func FormatLabels(labelMap map[string]string) string {
l := Set(labelMap).String()
if l == "" {
l = "<none>"
}
return l
}
// Conflicts takes 2 maps and returns true if there a key match between
// the maps but the value doesn't match, and returns false in other cases
func Conflicts(labels1, labels2 Set) bool {
small := labels1
big := labels2
if len(labels2) < len(labels1) {
small = labels2
big = labels1
}
for k, v := range small {
if val, match := big[k]; match {
if val != v {
return true
}
}
}
return false
}
// Merge combines given maps, and does not check for any conflicts
// between the maps. In case of conflicts, second map (labels2) wins
func Merge(labels1, labels2 Set) Set {
mergedMap := Set{}
for k, v := range labels1 {
mergedMap[k] = v
}
for k, v := range labels2 {
mergedMap[k] = v
}
return mergedMap
}
// Equals returns true if the given maps are equal
func Equals(labels1, labels2 Set) bool {
if len(labels1) != len(labels2) {
return false
}
for k, v := range labels1 {
value, ok := labels2[k]
if !ok {
return false
}
if value != v {
return false
}
}
return true
}
// ConvertSelectorToLabelsMap converts selector string to labels map
// and validates keys and values
func ConvertSelectorToLabelsMap(selector string, opts ...field.PathOption) (Set, error) {
labelsMap := Set{}
if len(selector) == 0 {
return labelsMap, nil
}
labels := strings.Split(selector, ",")
for _, label := range labels {
l := strings.Split(label, "=")
if len(l) != 2 {
return labelsMap, fmt.Errorf("invalid selector: %s", l)
}
key := strings.TrimSpace(l[0])
if err := validateLabelKey(key, field.ToPath(opts...)); err != nil {
return labelsMap, err
}
value := strings.TrimSpace(l[1])
if err := validateLabelValue(key, value, field.ToPath(opts...)); err != nil {
return labelsMap, err
}
labelsMap[key] = value
}
return labelsMap, nil
}
// Copyright 2022 ADA Logics Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package labels
func FuzzLabelsParse(data []byte) int {
_, _ = Parse(string(data))
return 1
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Copyright 2014 The Kubernetes Authors.
package labels
import (
"fmt"
"slices"
"sort"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
"github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection"
)
var (
unaryOperators = []string{
string(selection.Exists), string(selection.DoesNotExist),
}
binaryOperators = []string{
string(selection.In), string(selection.NotIn),
string(selection.Equals), string(selection.DoubleEquals), string(selection.NotEquals),
string(selection.GreaterThan), string(selection.LessThan),
}
validRequirementOperators = append(binaryOperators, unaryOperators...)
)
// Requirements is AND of all requirements.
type Requirements []Requirement
// Selector represents a label selector.
type Selector interface {
// Matches returns true if this selector matches the given set of labels.
Matches(Labels) bool
// Empty returns true if this selector does not restrict the selection space.
Empty() bool
// String returns a human readable string that represents this selector.
String() string
// Add adds requirements to the Selector
Add(r ...Requirement) Selector
// Requirements converts this interface into Requirements to expose
// more detailed selection information.
// If there are querying parameters, it will return converted requirements and selectable=true.
// If this selector doesn't want to select anything, it will return selectable=false.
Requirements() (requirements Requirements, selectable bool)
// Make a deep copy of the selector.
DeepCopySelector() Selector
// RequiresExactMatch allows a caller to introspect whether a given selector
// requires a single specific label to be set, and if so returns the value it
// requires.
RequiresExactMatch(label string) (value string, found bool)
}
// Sharing this saves 1 alloc per use; this is safe because it's immutable.
var sharedEverythingSelector Selector = internalSelector{}
// Everything returns a selector that matches all labels.
func Everything() Selector {
return sharedEverythingSelector
}
type nothingSelector struct{}
func (n nothingSelector) Matches(_ Labels) bool { return false }
func (n nothingSelector) Empty() bool { return false }
func (n nothingSelector) String() string { return "" }
func (n nothingSelector) Add(_ ...Requirement) Selector { return n }
func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
func (n nothingSelector) DeepCopySelector() Selector { return n }
func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) {
return "", false
}
// Sharing this saves 1 alloc per use; this is safe because it's immutable.
var sharedNothingSelector Selector = nothingSelector{}
// Nothing returns a selector that matches no labels
func Nothing() Selector {
return sharedNothingSelector
}
// NewSelector returns a nil selector
func NewSelector() Selector {
return internalSelector(nil)
}
type internalSelector []Requirement
func (s internalSelector) DeepCopy() internalSelector {
if s == nil {
return nil
}
result := make([]Requirement, len(s))
for i := range s {
s[i].DeepCopyInto(&result[i])
}
return result
}
func (s internalSelector) DeepCopySelector() Selector {
return s.DeepCopy()
}
// ByKey sorts requirements by key to obtain deterministic parser
type ByKey []Requirement
func (a ByKey) Len() int { return len(a) }
func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key }
// Requirement contains values, a key, and an operator that relates the key and values.
// The zero value of Requirement is invalid.
// Requirement implements both set based match and exact match
// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement.
// +k8s:deepcopy-gen=true
type Requirement struct {
key string
operator selection.Operator
// In huge majority of cases we have at most one value here.
// It is generally faster to operate on a single-element slice
// than on a single-element map, so we have a slice here.
strValues []string
}
// NewRequirement is the constructor for a Requirement.
// If any of these rules is violated, an error is returned:
// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist.
// 2. If the operator is In or NotIn, the values set must be non-empty.
// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
// 4. If the operator is Exists or DoesNotExist, the value set must be empty.
// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer.
// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details.
//
// The empty string is a valid value in the input values set.
// Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList
func NewRequirement(key string, op selection.Operator, vals []string, opts ...field.PathOption) (*Requirement, error) {
var allErrs field.ErrorList
path := field.ToPath(opts...)
if err := validateLabelKey(key, path.Child("key")); err != nil {
allErrs = append(allErrs, err)
}
valuePath := path.Child("values")
switch op {
case selection.In, selection.NotIn:
if len(vals) == 0 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'in', 'notin' operators, values set can't be empty"))
}
case selection.Equals, selection.DoubleEquals, selection.NotEquals:
if len(vals) != 1 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "exact-match compatibility requires one single value"))
}
case selection.Exists, selection.DoesNotExist:
if len(vals) != 0 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "values set must be empty for exists and does not exist"))
}
case selection.GreaterThan, selection.LessThan:
if len(vals) != 1 {
allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'Gt', 'Lt' operators, exactly one value is required"))
}
for i := range vals {
if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil {
allErrs = append(allErrs, field.Invalid(valuePath.Index(i), vals[i], "for 'Gt', 'Lt' operators, the value must be an integer"))
}
}
default:
allErrs = append(allErrs, field.NotSupported(path.Child("operator"), op, validRequirementOperators))
}
for i := range vals {
if err := validateLabelValue(key, vals[i], valuePath.Index(i)); err != nil {
allErrs = append(allErrs, err)
}
}
return &Requirement{key: key, operator: op, strValues: vals}, allErrs.ToAggregate()
}
func (r *Requirement) hasValue(value string) bool {
for i := range r.strValues {
if r.strValues[i] == value {
return true
}
}
return false
}
// Matches returns true if the Requirement matches the input Labels.
// There is a match in the following cases:
// 1. The operator is Exists and Labels has the Requirement's key.
// 2. The operator is In, Labels has the Requirement's key and Labels'
// value for that key is in Requirement's value set.
// 3. The operator is NotIn, Labels has the Requirement's key and
// Labels' value for that key is not in Requirement's value set.
// 4. The operator is DoesNotExist or NotIn and Labels does not have the
// Requirement's key.
// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has
// the Requirement's key and the corresponding value satisfies mathematical inequality.
func (r *Requirement) Matches(ls Labels) bool {
switch r.operator {
case selection.In, selection.Equals, selection.DoubleEquals:
if !ls.Has(r.key) {
return false
}
return r.hasValue(ls.Get(r.key))
case selection.NotIn, selection.NotEquals:
if !ls.Has(r.key) {
return true
}
return !r.hasValue(ls.Get(r.key))
case selection.Exists:
return ls.Has(r.key)
case selection.DoesNotExist:
return !ls.Has(r.key)
case selection.GreaterThan, selection.LessThan:
if !ls.Has(r.key) {
return false
}
lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
if err != nil {
klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
return false
}
// There should be only one strValue in r.strValues, and can be converted to an integer.
if len(r.strValues) != 1 {
klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
return false
}
var rValue int64
for i := range r.strValues {
rValue, err = strconv.ParseInt(r.strValues[i], 10, 64)
if err != nil {
klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
return false
}
}
return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue)
default:
return false
}
}
// Key returns requirement key
func (r *Requirement) Key() string {
return r.key
}
// Operator returns requirement operator
func (r *Requirement) Operator() selection.Operator {
return r.operator
}
// Values returns requirement values
func (r *Requirement) Values() sets.Set[string] {
ret := sets.New[string]()
for i := range r.strValues {
ret.Insert(r.strValues[i])
}
return ret
}
// Equal checks the equality of requirement.
func (r Requirement) Equal(x Requirement) bool {
if r.key != x.key {
return false
}
if r.operator != x.operator {
return false
}
return slices.Equal(r.strValues, x.strValues)
}
// Empty returns true if the internalSelector doesn't restrict selection space
func (s internalSelector) Empty() bool {
if s == nil {
return true
}
return len(s) == 0
}
// String returns a human-readable string that represents this
// Requirement. If called on an invalid Requirement, an error is
// returned. See NewRequirement for creating a valid Requirement.
func (r *Requirement) String() string {
var sb strings.Builder
sb.Grow(
// length of r.key
len(r.key) +
// length of 'r.operator' + 2 spaces for the worst case ('in' and 'notin')
len(r.operator) + 2 +
// length of 'r.strValues' slice times. Heuristically 5 chars per word
+5*len(r.strValues))
if r.operator == selection.DoesNotExist {
sb.WriteString("!")
}
sb.WriteString(r.key)
switch r.operator {
case selection.Equals:
sb.WriteString("=")
case selection.DoubleEquals:
sb.WriteString("==")
case selection.NotEquals:
sb.WriteString("!=")
case selection.In:
sb.WriteString(" in ")
case selection.NotIn:
sb.WriteString(" notin ")
case selection.GreaterThan:
sb.WriteString(">")
case selection.LessThan:
sb.WriteString("<")
case selection.Exists, selection.DoesNotExist:
return sb.String()
}
switch r.operator {
case selection.In, selection.NotIn:
sb.WriteString("(")
}
if len(r.strValues) == 1 {
sb.WriteString(r.strValues[0])
} else { // only > 1 since == 0 prohibited by NewRequirement
// normalizes value order on output, without mutating the in-memory selector representation
// also avoids normalization when it is not required, and ensures we do not mutate shared data
sb.WriteString(strings.Join(safeSort(r.strValues), ","))
}
switch r.operator {
case selection.In, selection.NotIn:
sb.WriteString(")")
}
return sb.String()
}
// safeSort sorts input strings without modification
func safeSort(in []string) []string {
if slices.IsSorted(in) {
return in
}
out := make([]string, len(in))
copy(out, in)
slices.Sort(out)
return out
}
// Add adds requirements to the selector. It copies the current selector returning a new one
func (s internalSelector) Add(reqs ...Requirement) Selector {
ret := make(internalSelector, 0, len(s)+len(reqs))
ret = append(ret, s...)
ret = append(ret, reqs...)
sort.Sort(ByKey(ret))
return ret
}
// Matches for a internalSelector returns true if all
// its Requirements match the input Labels. If any
// Requirement does not match, false is returned.
func (s internalSelector) Matches(l Labels) bool {
for ix := range s {
if matches := s[ix].Matches(l); !matches {
return false
}
}
return true
}
func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true }
// String returns a comma-separated string of all
// the internalSelector Requirements' human-readable strings.
func (s internalSelector) String() string {
var reqs []string
for ix := range s {
reqs = append(reqs, s[ix].String())
}
return strings.Join(reqs, ",")
}
// RequiresExactMatch introspects whether a given selector requires a single specific field
// to be set, and if so returns the value it requires.
func (s internalSelector) RequiresExactMatch(label string) (value string, found bool) {
for ix := range s {
if s[ix].key == label {
switch s[ix].operator {
case selection.Equals, selection.DoubleEquals, selection.In:
if len(s[ix].strValues) == 1 {
return s[ix].strValues[0], true
}
}
return "", false
}
}
return "", false
}
// Token represents constant definition for lexer token
type Token int
const (
// ErrorToken represents scan error
ErrorToken Token = iota
// EndOfStringToken represents end of string
EndOfStringToken
// ClosedParToken represents close parenthesis
ClosedParToken
// CommaToken represents the comma
CommaToken
// DoesNotExistToken represents logic not
DoesNotExistToken
// DoubleEqualsToken represents double equals
DoubleEqualsToken
// EqualsToken represents equal
EqualsToken
// GreaterThanToken represents greater than
GreaterThanToken
// IdentifierToken represents identifier, e.g. keys and values
IdentifierToken
// InToken represents in
InToken
// LessThanToken represents less than
LessThanToken
// NotEqualsToken represents not equal
NotEqualsToken
// NotInToken represents not in
NotInToken
// OpenParToken represents open parenthesis
OpenParToken
)
// string2token contains the mapping between lexer Token and token literal
// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense)
var string2token = map[string]Token{
")": ClosedParToken,
",": CommaToken,
"!": DoesNotExistToken,
"==": DoubleEqualsToken,
"=": EqualsToken,
">": GreaterThanToken,
"in": InToken,
"<": LessThanToken,
"!=": NotEqualsToken,
"notin": NotInToken,
"(": OpenParToken,
}
// ScannedItem contains the Token and the literal produced by the lexer.
type ScannedItem struct {
tok Token
literal string
}
// isWhitespace returns true if the rune is a space, tab, or newline.
func isWhitespace(ch byte) bool {
return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'
}
// isSpecialSymbol detects if the character ch can be an operator
func isSpecialSymbol(ch byte) bool {
switch ch {
case '=', '!', '(', ')', ',', '>', '<':
return true
}
return false
}
// Lexer represents the Lexer struct for label selector.
// It contains necessary informationt to tokenize the input string
type Lexer struct {
// s stores the string to be tokenized
s string
// pos is the position currently tokenized
pos int
}
// read returns the character currently lexed
// increment the position and check the buffer overflow
func (l *Lexer) read() (b byte) {
b = 0
if l.pos < len(l.s) {
b = l.s[l.pos]
l.pos++
}
return b
}
// unread 'undoes' the last read character
func (l *Lexer) unread() {
l.pos--
}
// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier.
func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) {
var buffer []byte
IdentifierLoop:
for {
switch ch := l.read(); {
case ch == 0:
break IdentifierLoop
case isSpecialSymbol(ch) || isWhitespace(ch):
l.unread()
break IdentifierLoop
default:
buffer = append(buffer, ch)
}
}
s := string(buffer)
if val, ok := string2token[s]; ok { // is a literal token?
return val, s
}
return IdentifierToken, s // otherwise is an identifier
}
// scanSpecialSymbol scans string starting with special symbol.
// special symbol identify non literal operators. "!=", "==", "="
func (l *Lexer) scanSpecialSymbol() (Token, string) {
lastScannedItem := ScannedItem{}
var buffer []byte
SpecialSymbolLoop:
for {
switch ch := l.read(); {
case ch == 0:
break SpecialSymbolLoop
case isSpecialSymbol(ch):
buffer = append(buffer, ch)
if token, ok := string2token[string(buffer)]; ok {
lastScannedItem = ScannedItem{tok: token, literal: string(buffer)}
} else if lastScannedItem.tok != 0 {
l.unread()
break SpecialSymbolLoop
}
default:
l.unread()
break SpecialSymbolLoop
}
}
if lastScannedItem.tok == 0 {
return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer)
}
return lastScannedItem.tok, lastScannedItem.literal
}
// skipWhiteSpaces consumes all blank characters
// returning the first non blank character
func (l *Lexer) skipWhiteSpaces(ch byte) byte {
for {
if !isWhitespace(ch) {
return ch
}
ch = l.read()
}
}
// Lex returns a pair of Token and the literal
// literal is meaningfull only for IdentifierToken token
func (l *Lexer) Lex() (tok Token, lit string) {
switch ch := l.skipWhiteSpaces(l.read()); {
case ch == 0:
return EndOfStringToken, ""
case isSpecialSymbol(ch):
l.unread()
return l.scanSpecialSymbol()
default:
l.unread()
return l.scanIDOrKeyword()
}
}
// Parser data structure contains the label selector parser data structure
type Parser struct {
l *Lexer
scannedItems []ScannedItem
position int
}
// ParserContext represents context during parsing:
// some literal for example 'in' and 'notin' can be
// recognized as operator for example 'x in (a)' but
// it can be recognized as value for example 'value in (in)'
type ParserContext int
const (
// KeyAndOperator represents key and operator
KeyAndOperator ParserContext = iota
// Values represents values
Values
)
// lookahead func returns the current token and string. No increment of current position
func (p *Parser) lookahead(context ParserContext) (Token, string) {
tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal
if context == Values {
switch tok {
case InToken, NotInToken:
tok = IdentifierToken
}
}
return tok, lit
}
// consume returns current token and string. Increments the position
func (p *Parser) consume(context ParserContext) (Token, string) {
p.position++
tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal
if context == Values {
switch tok {
case InToken, NotInToken:
tok = IdentifierToken
}
}
return tok, lit
}
// scan runs through the input string and stores the ScannedItem in an array
// Parser can now lookahead and consume the tokens
func (p *Parser) scan() {
for {
token, literal := p.l.Lex()
p.scannedItems = append(p.scannedItems, ScannedItem{token, literal})
if token == EndOfStringToken {
break
}
}
}
// parse runs the left recursive descending algorithm
// on input string. It returns a list of Requirement objects.
func (p *Parser) parse() (internalSelector, error) {
p.scan() // init scannedItems
var requirements internalSelector
for {
tok, lit := p.lookahead(Values)
switch tok {
case IdentifierToken, DoesNotExistToken:
r, err := p.parseRequirement()
if err != nil {
return nil, fmt.Errorf("unable to parse requirement: %w", err)
}
requirements = append(requirements, *r)
t, l := p.consume(Values)
switch t {
case EndOfStringToken:
return requirements, nil
case CommaToken:
t2, l2 := p.lookahead(Values)
if t2 != IdentifierToken && t2 != DoesNotExistToken {
return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2)
}
default:
return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l)
}
case EndOfStringToken:
return requirements, nil
default:
return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit)
}
}
}
func (p *Parser) parseRequirement() (*Requirement, error) {
key, operator, err := p.parseKeyAndInferOperator()
if err != nil {
return nil, err
}
if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked
return NewRequirement(key, operator, []string{})
}
operator, err = p.parseOperator()
if err != nil {
return nil, err
}
var values sets.Set[string]
switch operator {
case selection.In, selection.NotIn:
values, err = p.parseValues()
case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan:
values, err = p.parseExactValue()
}
if err != nil {
return nil, err
}
return NewRequirement(key, operator, sets.List(values))
}
// parseKeyAndInferOperator parses literals.
// in case of no operator '!, in, notin, ==, =, !=' are found
// the 'exists' operator is inferred
func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) {
var operator selection.Operator
tok, literal := p.consume(Values)
if tok == DoesNotExistToken {
operator = selection.DoesNotExist
tok, literal = p.consume(Values)
}
if tok != IdentifierToken {
err := fmt.Errorf("found '%s', expected: identifier", literal)
return "", "", err
}
if err := validateLabelKey(literal, nil); err != nil {
return "", "", err
}
if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken {
if operator != selection.DoesNotExist {
operator = selection.Exists
}
}
return literal, operator, nil
}
// parseOperator returns operator and eventually matchType
// matchType can be exact
func (p *Parser) parseOperator() (op selection.Operator, err error) {
tok, lit := p.consume(KeyAndOperator)
switch tok {
// DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator
case InToken:
op = selection.In
case EqualsToken:
op = selection.Equals
case DoubleEqualsToken:
op = selection.DoubleEquals
case GreaterThanToken:
op = selection.GreaterThan
case LessThanToken:
op = selection.LessThan
case NotInToken:
op = selection.NotIn
case NotEqualsToken:
op = selection.NotEquals
default:
return "", fmt.Errorf("found '%s', expected: %v", lit, strings.Join(binaryOperators, ", "))
}
return op, nil
}
// parseValues parses the values for set based matching (x,y,z)
func (p *Parser) parseValues() (sets.Set[string], error) {
tok, lit := p.consume(Values)
if tok != OpenParToken {
return nil, fmt.Errorf("found '%s' expected: '('", lit)
}
tok, lit = p.lookahead(Values)
switch tok {
case IdentifierToken, CommaToken:
s, err := p.parseIdentifiersList() // handles general cases
if err != nil {
return s, err
}
if tok, _ = p.consume(Values); tok != ClosedParToken {
return nil, fmt.Errorf("found '%s', expected: ')'", lit)
}
return s, nil
case ClosedParToken: // handles "()"
p.consume(Values)
return sets.New[string](""), nil
default:
return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit)
}
}
// parseIdentifiersList parses a (possibly empty) list of
// of comma separated (possibly empty) identifiers
func (p *Parser) parseIdentifiersList() (sets.Set[string], error) {
s := sets.New[string]()
for {
tok, lit := p.consume(Values)
switch tok {
case IdentifierToken:
s.Insert(lit)
tok2, lit2 := p.lookahead(Values)
switch tok2 {
case CommaToken:
continue
case ClosedParToken:
return s, nil
default:
return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2)
}
case CommaToken: // handled here since we can have "(,"
if s.Len() == 0 {
s.Insert("") // to handle (,
}
tok2, _ := p.lookahead(Values)
if tok2 == ClosedParToken {
s.Insert("") // to handle ,) Double "" removed by StringSet
return s, nil
}
if tok2 == CommaToken {
p.consume(Values)
s.Insert("") // to handle ,, Double "" removed by StringSet
}
default: // it can be operator
return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit)
}
}
}
// parseExactValue parses the only value for exact match style
func (p *Parser) parseExactValue() (sets.Set[string], error) {
s := sets.New[string]()
tok, _ := p.lookahead(Values)
if tok == EndOfStringToken || tok == CommaToken {
s.Insert("")
return s, nil
}
tok, lit := p.consume(Values)
if tok == IdentifierToken {
s.Insert(lit)
return s, nil
}
return nil, fmt.Errorf("found '%s', expected: identifier", lit)
}
// Parse takes a string representing a selector and returns a selector
// object, or an error. This parsing function differs from ParseSelector
// as they parse different selectors with different syntaxes.
// The input will cause an error if it does not follow this form:
//
// <selector-syntax> ::= <requirement> | <requirement> "," <selector-syntax>
// <requirement> ::= [!] KEY [ <set-based-restriction> | <exact-match-restriction> ]
// <set-based-restriction> ::= "" | <inclusion-exclusion> <value-set>
// <inclusion-exclusion> ::= <inclusion> | <exclusion>
// <exclusion> ::= "notin"
// <inclusion> ::= "in"
// <value-set> ::= "(" <values> ")"
// <values> ::= VALUE | VALUE "," <values>
// <exact-match-restriction> ::= ["="|"=="|"!="] VALUE
//
// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters.
// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters.
// Delimiter is white space: (' ', '\t')
// Example of valid syntax:
//
// "x in (foo,,baz),y,z notin ()"
//
// Note:
// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the
// VALUEs in its requirement
// 2. Exclusion - " notin " - denotes that the KEY is not equal to any
// of the VALUEs in its requirement or does not exist
// 3. The empty string is a valid VALUE
// 4. A requirement with just a KEY - as in "y" above - denotes that
// the KEY exists and can be any VALUE.
// 5. A requirement with just !KEY requires that the KEY not exist.
func Parse(selector string, opts ...field.PathOption) (Selector, error) {
parsedSelector, err := parse(selector, field.ToPath(opts...))
if err == nil {
return parsedSelector, nil
}
return nil, err
}
// parse parses the string representation of the selector and returns the internalSelector struct.
// The callers of this method can then decide how to return the internalSelector struct to their
// callers. This function has two callers now, one returns a Selector interface and the other
// returns a list of requirements.
func parse(selector string, _ *field.Path) (internalSelector, error) {
p := &Parser{l: &Lexer{s: selector, pos: 0}}
items, err := p.parse()
if err != nil {
return nil, err
}
sort.Sort(ByKey(items)) // sort to grant determistic parsing
return internalSelector(items), err
}
func validateLabelKey(k string, path *field.Path) *field.Error {
if errs := validation.IsQualifiedName(k); len(errs) != 0 {
return field.Invalid(path, k, strings.Join(errs, "; "))
}
return nil
}
func validateLabelValue(k, v string, path *field.Path) *field.Error {
if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
return field.Invalid(path.Key(k), v, strings.Join(errs, "; "))
}
return nil
}
// SelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
// It does not perform any validation, which means the server will reject
// the request if the Set contains invalid values.
func SelectorFromSet(ls Set) Selector {
return SelectorFromValidatedSet(ls)
}
// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
// The Set is validated client-side, which allows to catch errors early.
func ValidatedSelectorFromSet(ls Set) (Selector, error) {
if len(ls) == 0 {
return internalSelector{}, nil
}
requirements := make([]Requirement, 0, len(ls))
for label, value := range ls {
r, err := NewRequirement(label, selection.Equals, []string{value})
if err != nil {
return nil, err
}
requirements = append(requirements, *r)
}
// sort to have deterministic string representation
sort.Sort(ByKey(requirements))
return internalSelector(requirements), nil
}
// SelectorFromValidatedSet returns a Selector which will match exactly the given Set.
// A nil and empty Sets are considered equivalent to Everything().
// It assumes that Set is already validated and doesn't do any validation.
// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector
// instead, which does not copy.
func SelectorFromValidatedSet(ls Set) Selector {
if len(ls) == 0 {
return internalSelector{}
}
requirements := make([]Requirement, 0, len(ls))
for label, value := range ls {
requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}})
}
// sort to have deterministic string representation
sort.Sort(ByKey(requirements))
return internalSelector(requirements)
}
// ParseToRequirements takes a string representing a selector and returns a list of
// requirements. This function is suitable for those callers that perform additional
// processing on selector requirements.
// See the documentation for Parse() function for more details.
// TODO: Consider exporting the internalSelector type instead.
func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) {
return parse(selector, field.ToPath(opts...))
}
// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike
// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying
// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered
// equivalent to Everything().
//
// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these
// constraints are not met, Set.AsValidatedSelector should be preferred
//
// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to
// the less optimized version.
type ValidatedSetSelector Set
func (s ValidatedSetSelector) Matches(labels Labels) bool {
for k, v := range s {
if !labels.Has(k) || v != labels.Get(k) {
return false
}
}
return true
}
func (s ValidatedSetSelector) Empty() bool {
return len(s) == 0
}
func (s ValidatedSetSelector) String() string {
keys := make([]string, 0, len(s))
for k := range s {
keys = append(keys, k)
}
// Ensure deterministic output
slices.Sort(keys)
b := strings.Builder{}
for i, key := range keys {
v := s[key]
b.Grow(len(key) + 2 + len(v))
if i != 0 {
b.WriteString(",")
}
b.WriteString(key)
b.WriteString("=")
b.WriteString(v)
}
return b.String()
}
func (s ValidatedSetSelector) Add(r ...Requirement) Selector {
return s.toFullSelector().Add(r...)
}
func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) {
return s.toFullSelector().Requirements()
}
func (s ValidatedSetSelector) DeepCopySelector() Selector {
res := make(ValidatedSetSelector, len(s))
for k, v := range s {
res[k] = v
}
return res
}
func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) {
v, f := s[label]
return v, f
}
func (s ValidatedSetSelector) toFullSelector() Selector {
return SelectorFromValidatedSet(Set(s))
}
var _ Selector = ValidatedSetSelector{}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepcopy-gen. DO NOT EDIT.
package labels
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Requirement) DeepCopyInto(out *Requirement) {
*out = *in
if in.strValues != nil {
in, out := &in.strValues, &out.strValues
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement.
func (in *Requirement) DeepCopy() *Requirement {
if in == nil {
return nil
}
out := new(Requirement)
in.DeepCopyInto(out)
return out
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package labels
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ByKey) DeepEqual(other *ByKey) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Lexer) DeepEqual(other *Lexer) bool {
if other == nil {
return false
}
if in.s != other.s {
return false
}
if in.pos != other.pos {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Parser) DeepEqual(other *Parser) bool {
if other == nil {
return false
}
if (in.l == nil) != (other.l == nil) {
return false
} else if in.l != nil {
if !in.l.DeepEqual(other.l) {
return false
}
}
if ((in.scannedItems != nil) && (other.scannedItems != nil)) || ((in.scannedItems == nil) != (other.scannedItems == nil)) {
in, other := &in.scannedItems, &other.scannedItems
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
}
if in.position != other.position {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Requirement) DeepEqual(other *Requirement) bool {
if other == nil {
return false
}
if in.key != other.key {
return false
}
if in.operator != other.operator {
return false
}
if ((in.strValues != nil) && (other.strValues != nil)) || ((in.strValues == nil) != (other.strValues == nil)) {
in, other := &in.strValues, &other.strValues
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if inElement != (*other)[i] {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Requirements) DeepEqual(other *Requirements) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ScannedItem) DeepEqual(other *ScannedItem) bool {
if other == nil {
return false
}
if in.tok != other.tok {
return false
}
if in.literal != other.literal {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Set) DeepEqual(other *Set) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *ValidatedSetSelector) DeepEqual(other *ValidatedSetSelector) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if inValue != otherValue {
return false
}
}
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"bytes"
"sort"
"strings"
)
// LabelArray is an array of labels forming a set
type LabelArray []Label
// Sort is an internal utility to return all LabelArrays in sorted
// order, when the source material may be unsorted. 'ls' is sorted
// in-place, but also returns the sorted array for convenience.
func (ls LabelArray) Sort() LabelArray {
sort.Slice(ls, func(i, j int) bool {
return ls[i].Key < ls[j].Key
})
return ls
}
// ParseLabelArray parses a list of labels and returns a LabelArray
func ParseLabelArray(labels ...string) LabelArray {
array := make(LabelArray, len(labels))
for i := range labels {
array[i] = ParseLabel(labels[i])
}
return array.Sort()
}
// ParseSelectLabelArray parses a list of select labels and returns a LabelArray
func ParseSelectLabelArray(labels ...string) LabelArray {
array := make(LabelArray, len(labels))
for i := range labels {
array[i] = ParseSelectLabel(labels[i])
}
return array.Sort()
}
// ParseLabelArrayFromArray converts an array of strings as labels and returns a LabelArray
func ParseLabelArrayFromArray(base []string) LabelArray {
array := make(LabelArray, len(base))
for i := range base {
array[i] = ParseLabel(base[i])
}
return array.Sort()
}
// NewLabelArrayFromSortedList returns labels based on the output of SortedList()
// Trailing ';' will result in an empty key that must be filtered out.
func NewLabelArrayFromSortedList(list string) LabelArray {
base := strings.Split(list, ";")
array := make(LabelArray, 0, len(base))
for _, v := range base {
if lbl := ParseLabel(v); lbl.Key != "" {
array = append(array, lbl)
}
}
return array
}
// ParseSelectLabelArrayFromArray converts an array of strings as select labels and returns a LabelArray
func ParseSelectLabelArrayFromArray(base []string) LabelArray {
array := make(LabelArray, len(base))
for i := range base {
array[i] = ParseSelectLabel(base[i])
}
return array.Sort()
}
// Labels returns the LabelArray as Labels
func (ls LabelArray) Labels() Labels {
lbls := Labels{}
for _, l := range ls {
lbls[l.Key] = l
}
return lbls
}
// Contains returns true if all ls contains all the labels in needed. If
// needed contains no labels, Contains() will always return true
func (ls LabelArray) Contains(needed LabelArray) bool {
nextLabel:
for i := range needed {
for l := range ls {
if ls[l].Has(&needed[i]) {
continue nextLabel
}
}
return false
}
return true
}
// Intersects returns true if ls contains at least one label in needed.
//
// This has the same matching semantics as Has, namely,
// ["k8s:foo=bar"].Intersects(["any:foo=bar"]) == true
// ["any:foo=bar"].Intersects(["k8s:foo=bar"]) == false
func (ls LabelArray) Intersects(needed LabelArray) bool {
for _, l := range ls {
for _, n := range needed {
if l.Has(&n) {
return true
}
}
}
return false
}
// Lacks is identical to Contains but returns all missing labels
func (ls LabelArray) Lacks(needed LabelArray) LabelArray {
missing := LabelArray{}
nextLabel:
for i := range needed {
for l := range ls {
if ls[l].Has(&needed[l]) {
continue nextLabel
}
}
missing = append(missing, needed[i])
}
return missing
}
// Has returns whether the provided key exists in the label array.
// Implementation of the
// github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels.Labels interface.
//
// The key can be of source "any", in which case the source is
// ignored. The inverse, however, is not true.
// ["k8s.foo=bar"].Has("any.foo") => true
// ["any.foo=bar"].Has("k8s.foo") => false
//
// If the key is of source "cidr", this will also match
// broader keys.
// ["cidr:1.1.1.1/32"].Has("cidr.1.0.0.0/8") => true
// ["cidr:1.0.0.0/8"].Has("cidr.1.1.1.1/32") => false
func (ls LabelArray) Has(key string) bool {
// The key is submitted in the form of `source.key=value`
keyLabel := parseSelectLabel(key, '.')
for _, l := range ls {
if l.HasKey(&keyLabel) {
return true
}
}
return false
}
// Get returns the value for the provided key.
// Implementation of the
// github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels.Labels interface.
//
// The key can be of source "any", in which case the source is
// ignored. The inverse, however, is not true.
// ["k8s.foo=bar"].Get("any.foo") => "bar"
// ["any.foo=bar"].Get("k8s.foo") => ""
//
// If the key is of source "cidr", this will also match
// broader keys.
// ["cidr:1.1.1.1/32"].Has("cidr.1.0.0.0/8") => true
// ["cidr:1.0.0.0/8"].Has("cidr.1.1.1.1/32") => false
func (ls LabelArray) Get(key string) string {
keyLabel := parseSelectLabel(key, '.')
for _, l := range ls {
if l.HasKey(&keyLabel) {
return l.Value
}
}
return ""
}
// DeepCopy returns a deep copy of the labels.
func (ls LabelArray) DeepCopy() LabelArray {
if ls == nil {
return nil
}
o := make(LabelArray, len(ls))
copy(o, ls)
return o
}
// GetModel returns the LabelArray as a string array with fully-qualified labels.
// The output is parseable by ParseLabelArrayFromArray
func (ls LabelArray) GetModel() []string {
res := make([]string, 0, len(ls))
for l := range ls {
res = append(res, ls[l].String())
}
return res
}
func LabelArrayFromString(str string) LabelArray {
// each LabelArray starts with '[' and ends with ']'
if len(str) > 2 && str[0] == '[' && str[len(str)-1] == ']' {
str = str[1 : len(str)-1] // remove brackets
labels := strings.Split(str, " ")
la := make(LabelArray, 0, len(labels))
for j := range labels {
la = append(la, ParseLabel(labels[j]))
}
if len(la) > 0 {
return la
}
}
return nil
}
func (ls LabelArray) BuildString(sb *strings.Builder) {
sb.WriteString("[")
for l := range ls {
if l > 0 {
sb.WriteString(" ")
}
ls[l].BuildString(sb)
}
sb.WriteString("]")
}
func (ls LabelArray) String() string {
var sb strings.Builder
ls.BuildString(&sb)
return sb.String()
}
func (ls LabelArray) BuildBytes(buf *bytes.Buffer) {
buf.WriteString("[")
for l := range ls {
if l > 0 {
buf.WriteString(" ")
}
ls[l].BuildBytes(buf)
}
buf.WriteString("]")
}
// StringMap converts LabelArray into map[string]string
// Note: The source is included in the keys with a ':' separator.
// Note: LabelArray does not deduplicate entries, as it is an array. It is
// possible for the output to contain fewer entries when the source and key are
// repeated in a LabelArray, as that is the key of the output. This scenario is
// not expected.
func (ls LabelArray) StringMap() map[string]string {
o := map[string]string{}
for _, v := range ls {
o[v.Source+":"+v.Key] = v.Value
}
return o
}
// Equals returns true if the label arrays are the same, i.e., have the same labels in the same order.
func (ls LabelArray) Equals(b LabelArray) bool {
if len(ls) != len(b) {
return false
}
for l := range ls {
if !ls[l].Equals(&b[l]) {
return false
}
}
return true
}
// Less returns true if ls comes before b in the lexicographical order.
// Assumes both ls and b are already sorted.
func (ls LabelArray) Less(b LabelArray) bool {
lsLen, bLen := len(ls), len(b)
minLen := lsLen
if bLen < minLen {
minLen = bLen
}
for i := 0; i < minLen; i++ {
switch {
// Key
case ls[i].Key < b[i].Key:
return true
case ls[i].Key > b[i].Key:
return false
// Value
case ls[i].Value < b[i].Value:
return true
case ls[i].Value > b[i].Value:
return false
// Source
case ls[i].Source < b[i].Source:
return true
case ls[i].Source > b[i].Source:
return false
}
}
return lsLen < bLen
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"bytes"
"iter"
"sort"
"strings"
)
// LabelArrayList is an array of LabelArrays. It is primarily intended as a
// simple collection
type LabelArrayList []LabelArray
// DeepCopy returns a deep copy of the LabelArray, with each element also copied.
func (ls LabelArrayList) DeepCopy() LabelArrayList {
if ls == nil {
return nil
}
o := make(LabelArrayList, 0, len(ls))
for _, v := range ls {
o = append(o, v.DeepCopy())
}
return o
}
// GetModel returns the LabelArrayList as a [][]string. Each member LabelArray
// becomes a []string.
func (ls LabelArrayList) GetModel() [][]string {
res := make([][]string, 0, len(ls))
for _, v := range ls {
res = append(res, v.GetModel())
}
return res
}
// Equals returns true if the label arrays lists have the same label arrays in the same order.
func (ls LabelArrayList) Equals(b LabelArrayList) bool {
if len(ls) != len(b) {
return false
}
for l := range ls {
if !ls[l].Equals(b[l]) {
return false
}
}
return true
}
// Diff returns the string of differences between 'ls' and 'expected' LabelArrayList with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging. Assumes sorted LabelArrayLists.
func (ls LabelArrayList) Diff(expected LabelArrayList) (res string) {
res += ""
i := 0
j := 0
for i < len(ls) && j < len(expected) {
if ls[i].Equals(expected[j]) {
i++
j++
continue
}
if ls[i].Less(expected[j]) {
// obtained has an unexpected labelArray
res += " + " + ls[i].String() + "\n"
i++
}
for j < len(expected) && expected[j].Less(ls[i]) {
// expected has a missing labelArray
res += " - " + expected[j].String() + "\n"
j++
}
}
for i < len(ls) {
// obtained has an unexpected labelArray
res += " + " + ls[i].String() + "\n"
i++
}
for j < len(expected) {
// expected has a missing labelArray
res += " - " + expected[j].String() + "\n"
j++
}
return res
}
// GetModel returns the LabelArrayList as a [][]string. Each member LabelArray
// becomes a []string.
func (ls LabelArrayList) String() string {
var sb strings.Builder
for i := range ls {
if i > 0 {
sb.WriteString(", ")
}
ls[i].BuildString(&sb)
}
return sb.String()
}
func LabelArrayListFromString(str string) (ls LabelArrayList) {
// each LabelArray starts with '[' and ends with ']'
if len(str) > 2 && str[0] == '[' && str[len(str)-1] == ']' {
str = str[1 : len(str)-1] // remove first and last bracket
arrays := strings.Split(str, "], [")
for i := range arrays {
labels := strings.Split(arrays[i], " ")
var la LabelArray
for j := range labels {
la = append(la, ParseLabel(labels[j]))
}
ls = append(ls, la)
}
}
return ls
}
func ModelsFromLabelArrayListString(str string) iter.Seq[[]string] {
return func(yield func(labelArray []string) bool) {
// each LabelArray starts with '[' and ends with ']'
if len(str) > 2 && str[0] == '[' && str[len(str)-1] == ']' {
str = str[1 : len(str)-1] // remove first and last bracket
for {
i := strings.Index(str, "], [")
if i < 0 {
break
}
if !yield(strings.Split(str[:i], " ")) {
return
}
str = str[i+4:]
}
// last label array
yield(strings.Split(str, " "))
}
}
}
func (ls LabelArrayList) BuildBytes(buf *bytes.Buffer) {
for l, v := range ls {
if l > 0 {
buf.WriteString(", ")
}
v.BuildBytes(buf)
}
}
// Sort sorts the LabelArrayList in-place, but also returns the sorted list
// for convenience. The LabelArrays themselves must already be sorted. This is
// true for all constructors of LabelArray.
func (ls LabelArrayList) Sort() LabelArrayList {
sort.Slice(ls, func(i, j int) bool {
return ls[i].Less(ls[j])
})
return ls
}
// Merge incorporates new LabelArrays into an existing LabelArrayList, without
// introducing duplicates, returning the result for convenience. Existing
// duplication in either list is not removed.
func (lsp *LabelArrayList) Merge(include ...LabelArray) LabelArrayList {
lsp.Sort()
incl := LabelArrayList(include).Sort()
return lsp.MergeSorted(incl)
}
// MergeSorted incorporates new labels from 'include' to the receiver,
// both of which must be already sorted.
// LabelArrays are inserted from 'include' to the receiver as needed.
func (lsp *LabelArrayList) MergeSorted(include LabelArrayList) LabelArrayList {
merged := *lsp
i := 0
for j := 0; i < len(include) && j < len(merged); j++ {
if include[i].Less(merged[j]) {
merged = append(merged[:j+1], merged[j:]...) // make space at merged[j]
merged[j] = include[i]
i++
} else if include[i].Equals(merged[j]) {
i++
}
}
// 'include' may have more entries after original labels have been exhausted
if i < len(include) {
merged = append(merged, include[i:]...)
}
*lsp = merged
return *lsp
}
func nextArray(str string, end int) (int, int) {
start := strings.IndexByte(str[end:], '[')
if start >= 0 {
start += end
end = strings.IndexByte(str[start:], ']')
if end >= 0 {
end += start + 1
}
}
return start, end
}
func writeRemainder(str string, start, end int, sb *strings.Builder) {
if start >= 0 && start < end {
if sb.Len() > 0 {
sb.WriteString(", ")
}
sb.WriteString(str[start:])
}
}
// merge 'b' to 'a' assuming both are sorted
func MergeSortedLabelArrayListStrings(a, b string) string {
var sb strings.Builder
var aStart, aEnd, bStart, bEnd int
Loop:
for {
// get the next label array on 'a'
aStart, aEnd = nextArray(a, aEnd)
if aStart < 0 || aEnd < 0 || aStart >= aEnd {
// no more label arrays in a, concat the rest of 'b'
// next item from 'b' has not been parsed yet
bStart, bEnd = nextArray(b, bEnd)
writeRemainder(b, bStart, bEnd, &sb)
break
}
// get the next label array on 'b'
bStart, bEnd = nextArray(b, bEnd)
if bStart < 0 || bEnd < 0 || bStart >= bEnd {
// no more label arrays in b, concat the rest of 'a'
writeRemainder(a, aStart, aEnd, &sb)
break
}
// Add lesser label arrays from 'a'
for a[aStart:aEnd] < b[bStart:bEnd] {
if sb.Len() > 0 {
sb.WriteString(", ")
}
sb.WriteString(a[aStart:aEnd])
// get the next label array on 'a'
aStart, aEnd = nextArray(a, aEnd)
if aStart < 0 || aEnd < 0 || aStart >= aEnd {
// no more label arrays in 'a', concat the rest of 'b'
writeRemainder(b, bStart, bEnd, &sb)
break Loop
}
}
// Add lesser values from 'b'
for a[aStart:aEnd] > b[bStart:bEnd] {
if sb.Len() > 0 {
sb.WriteString(", ")
}
sb.WriteString(b[bStart:bEnd])
// get the next label array on 'b'
bStart, bEnd = nextArray(b, bEnd)
if bStart < 0 || bEnd < 0 || bStart >= bEnd {
// no more label arrays in 'b', concat the rest of 'a'
writeRemainder(a, aStart, aEnd, &sb)
break Loop
}
}
if a[aStart:aEnd] == b[bStart:bEnd] {
if sb.Len() > 0 {
sb.WriteString(", ")
}
sb.WriteString(b[bStart:bEnd])
}
}
return sb.String()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"fmt"
"net/netip"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/option"
)
var (
worldLabelNonDualStack = Label{Source: LabelSourceReserved, Key: IDNameWorld}
worldLabelV4 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv4}
worldLabelV6 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv6}
)
// maskedIPToLabelString is the base method for serializing an IP + prefix into
// a string that can be used for creating Labels and EndpointSelector objects.
//
// For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't
// support colons inside the name section of a label.
func maskedIPToLabel(ipStr string, prefix int) Label {
var str strings.Builder
str.Grow(
1 /* preZero */ +
len(ipStr) +
1 /* postZero */ +
2 /*len of prefix*/ +
1, /* '/' */
)
for i := 0; i < len(ipStr); i++ {
if ipStr[i] == ':' {
// EndpointSelector keys can't start or end with a "-", so insert a
// zero at the start or end if it would otherwise have a "-" at that
// position.
if i == 0 {
str.WriteByte('0')
str.WriteByte('-')
continue
}
if i == len(ipStr)-1 {
str.WriteByte('-')
str.WriteByte('0')
continue
}
str.WriteByte('-')
} else {
str.WriteByte(ipStr[i])
}
}
str.WriteRune('/')
str.WriteString(strconv.Itoa(prefix))
return Label{Key: str.String(), Source: LabelSourceCIDR}
}
// IPStringToLabel parses a string and returns it as a CIDR label.
//
// If ip is not a valid IP address or CIDR Prefix, returns an error.
func IPStringToLabel(ip string) (Label, error) {
// factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's
// an IP and not a CIDR.
i := strings.LastIndexByte(ip, '/')
if i < 0 {
parsedIP, err := netip.ParseAddr(ip)
if err != nil {
return Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err)
}
return maskedIPToLabel(ip, parsedIP.BitLen()), nil
} else {
parsedPrefix, err := netip.ParsePrefix(ip)
if err != nil {
return Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err)
}
return maskedIPToLabel(parsedPrefix.Masked().Addr().String(), parsedPrefix.Bits()), nil
}
}
// GetCIDRLabels turns a CIDR in to a specially formatted label, and returns
// a Labels including the CIDR-specific label and the appropriate world label.
// e.g. "10.0.0.0/8" => ["cidr:10.0.0.0/8", "reserved:world-ipv4"]
//
// IPv6 requires some special treatment, since ":" is special in the label selector
// grammar. For example, "::/0" becomes "cidr:0--0/0",
func GetCIDRLabels(prefix netip.Prefix) Labels {
lbls := make(Labels, 2)
if prefix.Bits() > 0 {
l := maskedIPToLabel(prefix.Addr().String(), prefix.Bits())
l.cidr = &prefix
lbls[l.Key] = l
}
lbls.AddWorldLabel(prefix.Addr())
return lbls
}
func (lbls Labels) AddWorldLabel(addr netip.Addr) {
switch {
case !option.Config.IsDualStack():
lbls[worldLabelNonDualStack.Key] = worldLabelNonDualStack
case addr.Is4():
lbls[worldLabelV4.Key] = worldLabelV4
default:
lbls[worldLabelV6.Key] = worldLabelV6
}
}
func LabelToPrefix(key string) (netip.Prefix, error) {
prefixStr := strings.Replace(key, "-", ":", -1)
pfx, err := netip.ParsePrefix(prefixStr)
if err != nil {
return netip.Prefix{}, fmt.Errorf("failed to parse label prefix %s: %w", key, err)
}
return pfx, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"bytes"
"encoding/json"
"fmt"
"net/netip"
"slices"
"strings"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/container/cache"
"github.com/cilium/cilium/pkg/logging/logfields"
)
const (
// PathDelimiter is the delimiter used in the labels paths.
PathDelimiter = "."
// IDNameHost is the label used for the hostname ID.
IDNameHost = "host"
// IDNameRemoteNode is the label used to describe the
// ReservedIdentityRemoteNode
IDNameRemoteNode = "remote-node"
// IDNameWorld is the label used for the world ID.
IDNameWorld = "world"
// IDNameWorldIPv4 is the label used for the world-ipv4 ID, to distinguish
// it from world-ipv6 in dual-stack mode.
IDNameWorldIPv4 = "world-ipv4"
// IDNameWorldIPv6 is the label used for the world-ipv6 ID, to distinguish
// it from world-ipv4 in dual-stack mode.
IDNameWorldIPv6 = "world-ipv6"
// IDNameCluster is the label used to identify an unspecified endpoint
// inside the cluster
IDNameCluster = "cluster"
// IDNameHealth is the label used for the local cilium-health endpoint
IDNameHealth = "health"
// IDNameInit is the label used to identify any endpoint that has not
// received any labels yet.
IDNameInit = "init"
// IDNameKubeAPIServer is the label used to identify the kube-apiserver. It
// is part of the reserved identity 7 and it is also used in conjunction
// with IDNameHost if the kube-apiserver is running on the local host.
IDNameKubeAPIServer = "kube-apiserver"
// IDNameEncryptedOverlay is the label used to identify encrypted overlay
// traffic.
//
// It is part of the reserved identity 11 and signals that overlay traffic
// with this identity must be IPSec encrypted before leaving the host.
//
// This identity should never be seen on the wire and is used only on the
// local host.
IDNameEncryptedOverlay = "overlay-to-encrypt"
// IDNameIngress is the label used to identify Ingress proxies. It is part
// of the reserved identity 8.
IDNameIngress = "ingress"
// IDNameNone is the label used to identify no endpoint or other L3 entity.
// It will never be assigned and this "label" is here for consistency with
// other Entities.
IDNameNone = "none"
// IDNameUnmanaged is the label used to identify unmanaged endpoints
IDNameUnmanaged = "unmanaged"
// IDNameUnknown is the label used to to identify an endpoint with an
// unknown identity.
IDNameUnknown = "unknown"
)
var (
// LabelHealth is the label used for health.
LabelHealth = Labels{IDNameHealth: NewLabel(IDNameHealth, "", LabelSourceReserved)}
// LabelHost is the label used for the host endpoint.
LabelHost = Labels{IDNameHost: NewLabel(IDNameHost, "", LabelSourceReserved)}
// LabelWorld is the label used for world.
LabelWorld = Labels{IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved)}
// LabelWorldIPv4 is the label used for world-ipv4.
LabelWorldIPv4 = Labels{IDNameWorldIPv4: NewLabel(IDNameWorldIPv4, "", LabelSourceReserved)}
// LabelWorldIPv6 is the label used for world-ipv6.
LabelWorldIPv6 = Labels{IDNameWorldIPv6: NewLabel(IDNameWorldIPv6, "", LabelSourceReserved)}
// LabelRemoteNode is the label used for remote nodes.
LabelRemoteNode = Labels{IDNameRemoteNode: NewLabel(IDNameRemoteNode, "", LabelSourceReserved)}
// LabelKubeAPIServer is the label used for the kube-apiserver. See comment
// on IDNameKubeAPIServer.
LabelKubeAPIServer = Labels{IDNameKubeAPIServer: NewLabel(IDNameKubeAPIServer, "", LabelSourceReserved)}
LabelKubeAPIServerExt = Labels{
IDNameKubeAPIServer: NewLabel(IDNameKubeAPIServer, "", LabelSourceReserved),
IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved),
}
// LabelIngress is the label used for Ingress proxies. See comment
// on IDNameIngress.
LabelIngress = Labels{IDNameIngress: NewLabel(IDNameIngress, "", LabelSourceReserved)}
// LabelKeyFixedIdentity is the label that can be used to define a fixed
// identity.
LabelKeyFixedIdentity = "io.cilium.fixed-identity"
)
const (
// LabelSourceUnspec is a label with unspecified source
LabelSourceUnspec = "unspec"
// LabelSourceAny is a label that matches any source
LabelSourceAny = "any"
// LabelSourceAnyKeyPrefix is prefix of a "any" label
LabelSourceAnyKeyPrefix = LabelSourceAny + "."
// LabelSourceK8s is a label imported from Kubernetes
LabelSourceK8s = "k8s"
// LabelSourceK8sKeyPrefix is prefix of a Kubernetes label
LabelSourceK8sKeyPrefix = LabelSourceK8s + "."
// LabelSourceContainer is a label imported from the container runtime
LabelSourceContainer = "container"
// LabelSourceCNI is a label imported from the CNI plugin
LabelSourceCNI = "cni"
// LabelSourceReserved is the label source for reserved types.
LabelSourceReserved = "reserved"
// LabelSourceCIDR is the label source for generated CIDRs.
LabelSourceCIDR = "cidr"
// LabelSourceCIDRGroup is the label source used for labels from CIDRGroups
LabelSourceCIDRGroup = "cidrgroup"
// LabelSourceCIDRGroupKeyPrefix is the source as a k8s selector key prefix
LabelSourceCIDRGroupKeyPrefix = LabelSourceCIDRGroup + "."
// LabelSourceNode is the label source for remote-nodes.
LabelSourceNode = "node"
// LabelSourceFQDN is the label source for IPs resolved by fqdn lookups
LabelSourceFQDN = "fqdn"
// LabelSourceReservedKeyPrefix is the prefix of a reserved label
LabelSourceReservedKeyPrefix = LabelSourceReserved + "."
// LabelSourceDirectory is the label source for policies read from files
LabelSourceDirectory = "directory"
)
// Label is the Cilium's representation of a container label.
type Label struct {
Key string `json:"key"`
Value string `json:"value,omitempty"`
// Source can be one of the above values (e.g.: LabelSourceContainer).
//
// +kubebuilder:validation:Optional
Source string `json:"source"`
// optimization for CIDR prefixes
// +deepequal-gen=false
cidr *netip.Prefix `json:"-"`
}
// Labels is a map of labels where the map's key is the same as the label's key.
type Labels map[string]Label
//
// Convenience functions to use instead of Has(), which iterates through the labels
//
// HasLabelWithKey returns true if lbls has a label with 'key'
func (l Labels) HasLabelWithKey(key string) bool {
_, ok := l[key]
return ok
}
func (l Labels) HasFixedIdentityLabel() bool {
return l.HasLabelWithKey(LabelKeyFixedIdentity)
}
func (l Labels) HasInitLabel() bool {
return l.HasLabelWithKey(IDNameInit)
}
func (l Labels) HasHealthLabel() bool {
return l.HasLabelWithKey(IDNameHealth)
}
func (l Labels) HasIngressLabel() bool {
return l.HasLabelWithKey(IDNameIngress)
}
func (l Labels) HasHostLabel() bool {
return l.HasLabelWithKey(IDNameHost)
}
func (l Labels) HasKubeAPIServerLabel() bool {
return l.HasLabelWithKey(IDNameKubeAPIServer)
}
func (l Labels) HasRemoteNodeLabel() bool {
return l.HasLabelWithKey(IDNameRemoteNode)
}
func (l Labels) HasWorldIPv6Label() bool {
return l.HasLabelWithKey(IDNameWorldIPv6)
}
func (l Labels) HasWorldIPv4Label() bool {
return l.HasLabelWithKey(IDNameWorldIPv4)
}
func (l Labels) HasNonDualstackWorldLabel() bool {
return l.HasLabelWithKey(IDNameWorld)
}
func (l Labels) HasWorldLabel() bool {
return l.HasNonDualstackWorldLabel() || l.HasWorldIPv4Label() || l.HasWorldIPv6Label()
}
// GetPrintableModel turns the Labels into a sorted list of strings
// representing the labels.
func (l Labels) GetPrintableModel() (res []string) {
res = make([]string, 0, len(l))
for _, v := range l {
if v.Source == LabelSourceCIDR {
prefix, err := LabelToPrefix(v.Key)
if err != nil {
res = append(res, v.String())
} else {
res = append(res, LabelSourceCIDR+":"+prefix.String())
}
} else {
// not a CIDR label, no magic needed
res = append(res, v.String())
}
}
slices.Sort(res)
return res
}
// String returns the map of labels as human readable string
func (l Labels) String() string {
return strings.Join(l.GetPrintableModel(), ",")
}
// Equals returns true if the two Labels contain the same set of labels.
func (l Labels) Equals(other Labels) bool {
if len(l) != len(other) {
return false
}
for k, lbl1 := range l {
if lbl2, ok := other[k]; ok {
if lbl1.Source == lbl2.Source && lbl1.Key == lbl2.Key && lbl1.Value == lbl2.Value {
continue
}
}
return false
}
return true
}
// GetFromSource returns all labels that are from the given source.
func (l Labels) GetFromSource(source string) Labels {
lbls := Labels{}
for k, v := range l {
if v.Source == source {
lbls[k] = v
}
}
return lbls
}
// RemoveFromSource removes all labels that are from the given source
func (l Labels) RemoveFromSource(source string) Labels {
lbls := Labels{}
for k, v := range l {
if v.Source != source {
lbls[k] = v
}
}
return lbls
}
// NewLabel returns a new label from the given key, value and source. If source is empty,
// the default value will be LabelSourceUnspec. If key starts with '$', the source
// will be overwritten with LabelSourceReserved. If key contains ':', the value
// before ':' will be used as source if given source is empty, otherwise the value before
// ':' will be deleted and unused.
func NewLabel(key string, value string, source string) Label {
var src string
src, key = parseSource(key, ':')
if source == "" {
if src == "" {
source = LabelSourceUnspec
} else {
source = src
}
}
if src == LabelSourceReserved && key == "" {
key = value
value = ""
}
l := Label{
Key: cache.Strings.Get(key),
Value: cache.Strings.Get(value),
Source: cache.Strings.Get(source),
}
if l.Source == LabelSourceCIDR {
c, err := LabelToPrefix(l.Key)
if err != nil {
logrus.WithField("key", l.Key).WithError(err).Error("Failed to parse CIDR label: invalid prefix.")
} else {
l.cidr = &c
}
}
return l
}
// Equals returns true if source, Key and Value are equal and false otherwise.
func (l *Label) Equals(b *Label) bool {
if !l.IsAnySource() && l.Source != b.Source {
return false
}
return l.Key == b.Key && l.Value == b.Value
}
// IsAnySource return if the label was set with source "any".
func (l *Label) IsAnySource() bool {
return l.Source == LabelSourceAny
}
// IsReservedSource return if the label was set with source "Reserved".
func (l *Label) IsReservedSource() bool {
return l.Source == LabelSourceReserved
}
// Has returns true label L contains target.
// target may be "looser" w.r.t source or cidr, i.e.
// "k8s:foo=bar".Has("any:foo=bar") is true
// "any:foo=bar".Has("k8s:foo=bar") is false
// "cidr:10.0.0.1/32".Has("cidr:10.0.0.0/24") is true
func (l *Label) Has(target *Label) bool {
return l.HasKey(target) && l.Value == target.Value
}
// HasKey returns true if l has target's key.
// target may be "looser" w.r.t source or cidr, i.e.
// "k8s:foo=bar".HasKey("any:foo") is true
// "any:foo=bar".HasKey("k8s:foo") is false
// "cidr:10.0.0.1/32".HasKey("cidr:10.0.0.0/24") is true
// "cidr:10.0.0.0/24".HasKey("cidr:10.0.0.1/32") is false
func (l *Label) HasKey(target *Label) bool {
if !target.IsAnySource() && l.Source != target.Source {
return false
}
// Do cidr-aware matching if both sources are "cidr".
if target.Source == LabelSourceCIDR && l.Source == LabelSourceCIDR {
tc := target.cidr
if tc == nil {
v, err := LabelToPrefix(target.Key)
if err != nil {
tc = &v
}
}
lc := l.cidr
if lc == nil {
v, err := LabelToPrefix(l.Key)
if err != nil {
lc = &v
}
}
if tc != nil && lc != nil && tc.Bits() <= lc.Bits() && tc.Contains(lc.Addr()) {
return true
}
}
return l.Key == target.Key
}
// String returns the string representation of Label in the for of Source:Key=Value or
// Source:Key if Value is empty.
func (l *Label) String() string {
if len(l.Value) != 0 {
return l.Source + ":" + l.Key + "=" + l.Value
}
return l.Source + ":" + l.Key
}
func (l *Label) BuildString(sb *strings.Builder) {
sb.WriteString(l.Source)
sb.WriteString(":")
sb.WriteString(l.Key)
if len(l.Value) != 0 {
sb.WriteString("=")
sb.WriteString(l.Value)
}
}
func (l *Label) BuildBytes(buf *bytes.Buffer) {
buf.WriteString(l.Source)
buf.WriteString(":")
buf.WriteString(l.Key)
if len(l.Value) != 0 {
buf.WriteString("=")
buf.WriteString(l.Value)
}
}
// IsValid returns true if Key != "".
func (l *Label) IsValid() bool {
return l.Key != ""
}
// UnmarshalJSON TODO create better explanation about unmarshall with examples
func (l *Label) UnmarshalJSON(data []byte) error {
if l == nil {
return fmt.Errorf("cannot unmarshal to nil pointer")
}
if len(data) == 0 {
return fmt.Errorf("invalid Label: empty data")
}
var aux struct {
Source string `json:"source"`
Key string `json:"key"`
Value string `json:"value,omitempty"`
}
err := json.Unmarshal(data, &aux)
if err != nil {
// If parsing of the full representation failed then try the short
// form in the format:
//
// [SOURCE:]KEY[=VALUE]
var aux string
if err := json.Unmarshal(data, &aux); err != nil {
return fmt.Errorf("decode of Label as string failed: %w", err)
}
if aux == "" {
return fmt.Errorf("invalid Label: Failed to parse %s as a string", data)
}
*l = ParseLabel(aux)
} else {
if aux.Key == "" {
return fmt.Errorf("invalid Label: '%s' does not contain label key", data)
}
l.Source = aux.Source
l.Key = aux.Key
l.Value = aux.Value
}
if l.Source == LabelSourceCIDR {
c, err := LabelToPrefix(l.Key)
if err == nil {
l.cidr = &c
} else {
logrus.WithField("key", l.Key).WithError(err).Error("Failed to parse CIDR label: invalid prefix.")
}
}
return nil
}
// GetExtendedKey returns the key of a label with the source encoded.
func (l *Label) GetExtendedKey() string {
return l.Source + PathDelimiter + l.Key
}
// GetCiliumKeyFrom returns the label's source and key from the an extended key
// in the format SOURCE:KEY.
func GetCiliumKeyFrom(extKey string) string {
i := strings.IndexByte(extKey, PathDelimiter[0])
if i >= 0 {
return extKey[:i] + ":" + extKey[i+1:]
}
return LabelSourceAny + ":" + extKey
}
// GetExtendedKeyFrom returns the extended key of a label string.
// For example:
// `k8s:foo=bar` returns `k8s.foo`
// `container:foo=bar` returns `container.foo`
// `foo=bar` returns `any.foo=bar`
func GetExtendedKeyFrom(str string) string {
src, next := parseSource(str, ':')
if src == "" {
src = LabelSourceAny
}
// Remove an eventually value
i := strings.IndexByte(next, '=')
if i >= 0 {
return src + PathDelimiter + next[:i]
}
return src + PathDelimiter + next
}
// Map2Labels transforms in the form: map[key(string)]value(string) into Labels. The
// source argument will overwrite the source written in the key of the given map.
// Example:
// l := Map2Labels(map[string]string{"k8s:foo": "bar"}, "cilium")
// fmt.Printf("%+v\n", l)
//
// map[string]Label{"foo":Label{Key:"foo", Value:"bar", Source:"cilium"}}
func Map2Labels(m map[string]string, source string) Labels {
o := make(Labels, len(m))
for k, v := range m {
l := NewLabel(k, v, source)
o[l.Key] = l
}
return o
}
// StringMap converts Labels into map[string]string
func (l Labels) StringMap() map[string]string {
o := make(map[string]string, len(l))
for _, v := range l {
o[v.Source+":"+v.Key] = v.Value
}
return o
}
// StringMap converts Labels into map[string]string
func (l Labels) K8sStringMap() map[string]string {
o := make(map[string]string, len(l))
for _, v := range l {
if v.Source == LabelSourceK8s || v.Source == LabelSourceAny || v.Source == LabelSourceUnspec {
o[v.Key] = v.Value
} else {
o[v.Source+"."+v.Key] = v.Value
}
}
return o
}
// NewLabelsFromModel creates labels from string array.
func NewLabelsFromModel(base []string) Labels {
lbls := make(Labels, len(base))
for _, v := range base {
if lbl := ParseLabel(v); lbl.Key != "" {
lbls[lbl.Key] = lbl
}
}
return lbls
}
// FromSlice creates labels from a slice of labels.
func FromSlice(labels []Label) Labels {
lbls := make(Labels, len(labels))
for _, lbl := range labels {
lbls[lbl.Key] = lbl
}
return lbls
}
// NewLabelsFromSortedList returns labels based on the output of SortedList()
func NewLabelsFromSortedList(list string) Labels {
return NewLabelsFromModel(strings.Split(list, ";"))
}
// NewSelectLabelArrayFromModel parses a slice of strings and converts them
// into an array of selecting labels, sorted by the key.
func NewSelectLabelArrayFromModel(base []string) LabelArray {
lbls := make(LabelArray, 0, len(base))
for i := range base {
lbls = append(lbls, ParseSelectLabel(base[i]))
}
return lbls.Sort()
}
// NewFrom creates a new Labels from the given labels by creating a copy.
func NewFrom(l Labels) Labels {
nl := make(Labels, len(l))
nl.MergeLabels(l)
return nl
}
// GetModel returns model with all the values of the labels.
func (l Labels) GetModel() []string {
res := make([]string, 0, len(l))
for _, v := range l {
res = append(res, v.String())
}
return res
}
// MergeLabels merges labels from into to. It overwrites all labels with the same Key as
// from written into to.
// Example:
// to := Labels{Label{key1, value1, source1}, Label{key2, value3, source4}}
// from := Labels{Label{key1, value3, source4}}
// to.MergeLabels(from)
// fmt.Printf("%+v\n", to)
//
// Labels{Label{key1, value3, source4}, Label{key2, value3, source4}}
func (l Labels) MergeLabels(from Labels) {
for k, v := range from {
l[k] = v
}
}
// Remove is similar to MergeLabels, but returns a new Labels object with the
// specified Labels removed. The received Labels is not modified.
func (l Labels) Remove(from Labels) Labels {
result := make(Labels, len(l))
for k, v := range l {
if _, exists := from[k]; !exists {
result[k] = v
}
}
return result
}
// FormatForKVStore returns the label as a formatted string, ending in
// a semicolon
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS
// PART OF THE KEY IN THE KEY-VALUE STORE.
//
// Non-pointer receiver allows this to be called on a value in a map.
func (l Label) FormatForKVStore() []byte {
// We don't care if the values already have a '='.
//
// We absolutely care that the final character is a semi-colon.
// Identity allocation in the kvstore depends on this (see
// kvstore.prefixMatchesKey())
b := make([]byte, 0, len(l.Source)+len(l.Key)+len(l.Value)+3)
buf := bytes.NewBuffer(b)
l.formatForKVStoreInto(buf)
return buf.Bytes()
}
// formatForKVStoreInto writes the label as a formatted string, ending in
// a semicolon into buf.
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS
// PART OF THE KEY IN THE KEY-VALUE STORE.
//
// Non-pointer receiver allows this to be called on a value in a map.
func (l Label) formatForKVStoreInto(buf *bytes.Buffer) {
buf.WriteString(l.Source)
buf.WriteRune(':')
buf.WriteString(l.Key)
buf.WriteRune('=')
buf.WriteString(l.Value)
buf.WriteRune(';')
}
// SortedList returns the labels as a sorted list, separated by semicolon
//
// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS KEY IN
// THE KEY-VALUE STORE.
func (l Labels) SortedList() []byte {
keys := make([]string, 0, len(l))
for k := range l {
keys = append(keys, k)
}
slices.Sort(keys)
// Labels can have arbitrary size. However, when many CIDR identities are in
// the system, for example due to a FQDN policy matching S3, CIDR labels
// dominate in number. IPv4 CIDR labels in serialized form are max 25 bytes
// long. Allocate slightly more to avoid having a realloc if there's some
// other labels which may longer, since the cost of allocating a few bytes
// more is dominated by a second allocation, especially since these
// allocations are short-lived.
//
// cidr:123.123.123.123/32=;
// 0 1 2
// 1234567890123456789012345
b := make([]byte, 0, len(keys)*30)
buf := bytes.NewBuffer(b)
for _, k := range keys {
l[k].formatForKVStoreInto(buf)
}
return buf.Bytes()
}
// ToSlice returns a slice of label with the values of the given
// Labels' map, sorted by the key.
func (l Labels) ToSlice() []Label {
return l.LabelArray()
}
// LabelArray returns the labels as label array, sorted by the key.
func (l Labels) LabelArray() LabelArray {
labels := make(LabelArray, 0, len(l))
for _, v := range l {
labels = append(labels, v)
}
return labels.Sort()
}
// FindReserved locates all labels with reserved source in the labels and
// returns a copy of them. If there are no reserved labels, returns nil.
// TODO: return LabelArray as it is likely faster
func (l Labels) FindReserved() Labels {
lbls := Labels{}
for k, lbl := range l {
if lbl.Source == LabelSourceReserved {
lbls[k] = lbl
}
}
if len(lbls) > 0 {
return lbls
}
return nil
}
// IsReserved returns true if any of the labels has a reserved source.
func (l Labels) IsReserved() bool {
return l.HasSource(LabelSourceReserved)
}
// Has returns true if l contains the given label.
func (l Labels) Has(label Label) bool {
for _, lbl := range l {
if lbl.Has(&label) {
return true
}
}
return false
}
// HasSource returns true if l contains the given label source.
func (l Labels) HasSource(source string) bool {
for _, lbl := range l {
if lbl.Source == source {
return true
}
}
return false
}
// CollectSources returns all distinct label sources found in l
func (l Labels) CollectSources() map[string]struct{} {
sources := make(map[string]struct{})
for _, lbl := range l {
sources[lbl.Source] = struct{}{}
}
return sources
}
// parseSource returns the parsed source of the given str. It also returns the next piece
// of text that is after the source.
// Example:
//
// src, next := parseSource("foo:bar==value")
//
// Println(src) // foo
// Println(next) // bar==value
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseSource(str string, delim byte) (src, next string) {
if str == "" {
return "", ""
}
if str[0] == '$' {
return LabelSourceReserved, str[1:]
}
i := strings.IndexByte(str, delim)
if i < 0 {
if delim != '.' && strings.HasPrefix(str, LabelSourceReservedKeyPrefix) {
return LabelSourceReserved, strings.TrimPrefix(str, LabelSourceReservedKeyPrefix)
}
return "", str
}
return str[:i], str[i+1:]
}
// ParseLabel returns the label representation of the given string. The str should be
// in the form of Source:Key=Value or Source:Key if Value is empty. It also parses short
// forms, for example: $host will be Label{Key: "host", Source: "reserved", Value: ""}.
func ParseLabel(str string) Label {
return parseLabel(str, ':')
}
// parseLabel returns the label representation of the given string by value.
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseLabel(str string, delim byte) (lbl Label) {
src, next := parseSource(str, delim)
if src != "" {
lbl.Source = src
} else {
lbl.Source = LabelSourceUnspec
}
i := strings.IndexByte(next, '=')
if i < 0 {
lbl.Key = next
} else {
if i == 0 && src == LabelSourceReserved {
lbl.Key = next[i+1:]
} else {
lbl.Key = next[:i]
lbl.Value = next[i+1:]
}
}
if lbl.Source == LabelSourceCIDR {
if lbl.Value != "" {
logrus.WithField(logfields.Label, lbl.String()).Error("Invalid CIDR label: labels with source cidr cannot have values.")
}
c, err := LabelToPrefix(lbl.Key)
if err != nil {
logrus.WithField(logfields.Label, str).WithError(err).Error("Failed to parse CIDR label: invalid prefix.")
} else {
lbl.cidr = &c
}
}
return lbl
}
// ParseSelectLabel returns a selecting label representation of the given
// string. Unlike ParseLabel, if source is unspecified, the source defaults to
// LabelSourceAny
func ParseSelectLabel(str string) Label {
return parseSelectLabel(str, ':')
}
// parseSelectLabel returns a selecting label representation of the given
// string by value.
// For Cilium format 'delim' must be passed in as ':'
// For k8s format 'delim' must be passed in as '.'
func parseSelectLabel(str string, delim byte) Label {
lbl := parseLabel(str, delim)
if lbl.Source == LabelSourceUnspec {
lbl.Source = LabelSourceAny
}
return lbl
}
// generateLabelString generates the string representation of a label with
// the provided source, key, and value in the format "source:key=value".
func generateLabelString(source, key, value string) string {
return source + ":" + key + "=" + value
}
// GenerateK8sLabelString generates the string representation of a label with
// the provided source, key, and value in the format "LabelSourceK8s:key=value".
func GenerateK8sLabelString(k, v string) string {
return generateLabelString(LabelSourceK8s, k, v)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package labels
import (
"fmt"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type keepMarks map[string]struct{}
// set marks the label with 'key' to not be deleted.
func (k keepMarks) set(key string) {
k[key] = struct{}{} // marked for keeping
}
// OpLabels represents the possible types.
type OpLabels struct {
// Active labels that are enabled and disabled but not deleted
Custom Labels
// Labels derived from orchestration system
OrchestrationIdentity Labels
// orchestrationIdentity labels which have been disabled
Disabled Labels
// orchestrationInfo - labels from orchestration which are not used in determining a security identity
OrchestrationInfo Labels
}
// NewOpLabels creates new initialized OpLabels
func NewOpLabels() OpLabels {
return OpLabels{
Custom: Labels{},
Disabled: Labels{},
OrchestrationIdentity: Labels{},
OrchestrationInfo: Labels{},
}
}
// SplitUserLabelChanges returns labels to 'add' and 'del'ete to make
// the custom labels match 'lbls'
// FIXME: Somewhere in the code we crash if the returned maps are non-nil
// but length 0. We retain this behaviour here because it's easier.
func (o *OpLabels) SplitUserLabelChanges(lbls Labels) (add, del Labels) {
for key, lbl := range lbls {
if _, found := o.Custom[key]; !found {
if add == nil {
add = Labels{}
}
add[key] = lbl
}
}
for key, lbl := range o.Custom {
if _, found := lbls[key]; !found {
if del == nil {
del = Labels{}
}
del[key] = lbl
}
}
return add, del
}
// IdentityLabels returns map of labels that are used when determining a
// security identity.
func (o *OpLabels) IdentityLabels() Labels {
enabled := make(Labels, len(o.Custom)+len(o.OrchestrationIdentity))
for k, v := range o.Custom {
enabled[k] = v
}
for k, v := range o.OrchestrationIdentity {
enabled[k] = v
}
return enabled
}
// GetIdentityLabel returns the value of the given Key from all IdentityLabels.
func (o *OpLabels) GetIdentityLabel(key string) (l Label, found bool) {
l, found = o.OrchestrationIdentity[key]
if !found {
l, found = o.Custom[key]
}
return l, found
}
// AllLabels returns all Labels within the provided OpLabels.
func (o *OpLabels) AllLabels() Labels {
all := make(Labels, len(o.Custom)+len(o.OrchestrationInfo)+len(o.OrchestrationIdentity)+len(o.Disabled))
for k, v := range o.Custom {
all[k] = v
}
for k, v := range o.Disabled {
all[k] = v
}
for k, v := range o.OrchestrationIdentity {
all[k] = v
}
for k, v := range o.OrchestrationInfo {
all[k] = v
}
return all
}
func (o *OpLabels) ReplaceInformationLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool {
changed := false
keepers := make(keepMarks)
for _, v := range l {
keepers.set(v.Key)
if o.OrchestrationInfo.upsertLabel(sourceFilter, v) {
changed = true
logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning information label")
}
}
o.OrchestrationInfo.deleteUnMarked(sourceFilter, keepers)
return changed
}
func (o *OpLabels) ReplaceIdentityLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool {
changed := false
keepers := make(keepMarks)
disabledKeepers := make(keepMarks)
for k, v := range l {
// A disabled identity label stays disabled without value updates
if _, found := o.Disabled[k]; found {
disabledKeepers.set(k)
} else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(sourceFilter, v) {
logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning security relevant label")
changed = true
}
}
if o.OrchestrationIdentity.deleteUnMarked(sourceFilter, keepers) || o.Disabled.deleteUnMarked(sourceFilter, disabledKeepers) {
changed = true
}
return changed
}
func (o *OpLabels) ModifyIdentityLabels(addLabels, delLabels Labels) (changed bool, err error) {
for k := range delLabels {
// The change request is accepted if the label is on
// any of the lists. If the label is already disabled,
// we will simply ignore that change.
if _, found := o.Custom[k]; !found {
if _, found := o.OrchestrationIdentity[k]; !found {
if _, found := o.Disabled[k]; !found {
return false, fmt.Errorf("label %s not found", k)
}
}
}
}
// Will not fail after this point
for k := range delLabels {
if v, found := o.OrchestrationIdentity[k]; found {
delete(o.OrchestrationIdentity, k)
o.Disabled[k] = v
changed = true
}
if _, found := o.Custom[k]; found {
delete(o.Custom, k)
changed = true
}
}
for k, v := range addLabels {
if _, found := o.Disabled[k]; found { // Restore label.
delete(o.Disabled, k)
o.OrchestrationIdentity[k] = v
changed = true
} else if _, found := o.OrchestrationIdentity[k]; found { // Replace label's source and value.
o.OrchestrationIdentity[k] = v
changed = true
} else {
o.Custom[k] = v
changed = true
}
}
return changed, nil
}
// upsertLabel updates or inserts 'label' in 'l', but only if exactly the same label
// was not already in 'l'. Returns 'true' if a label was added, or an old label was
// updated, 'false' otherwise.
// The label is only updated if its source matches the provided 'sourceFilter'
// or in case the provided sourceFilter is 'LabelSourceAny'. The new label must
// also match the old label 'source' in order for it to be replaced.
func (l Labels) upsertLabel(sourceFilter string, label Label) bool {
oldLabel, found := l[label.Key]
if found {
if sourceFilter != LabelSourceAny && sourceFilter != oldLabel.Source {
return false
}
// Key is the same, check if Value and Source are also the same
if label.Value == oldLabel.Value && label.Source == oldLabel.Source {
return false // No change
}
// If the label is not from the same source, then don't replace it.
if oldLabel.Source != label.Source {
return false
}
}
// Insert or replace old label
l[label.Key] = label
return true
}
// deleteUnMarked deletes the labels which have not been marked for keeping.
// The labels are only deleted if their source matches the provided sourceFilter
// or in case the provided sourceFilter is 'LabelSourceAny'.
// Returns true if any of them were deleted.
func (l Labels) deleteUnMarked(sourceFilter string, marks keepMarks) bool {
deleted := false
for k, v := range l {
if _, keep := marks[k]; !keep && (sourceFilter == LabelSourceAny || sourceFilter == v.Source) {
delete(l, k)
deleted = true
}
}
return deleted
}
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Code generated by deepequal-gen. DO NOT EDIT.
package labels
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Label) DeepEqual(other *Label) bool {
if other == nil {
return false
}
if in.Key != other.Key {
return false
}
if in.Value != other.Value {
return false
}
if in.Source != other.Source {
return false
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LabelArray) DeepEqual(other *LabelArray) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *LabelArrayList) DeepEqual(other *LabelArrayList) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for i, inElement := range *in {
if !inElement.DeepEqual(&(*other)[i]) {
return false
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *Labels) DeepEqual(other *Labels) bool {
if other == nil {
return false
}
if len(*in) != len(*other) {
return false
} else {
for key, inValue := range *in {
if otherValue, present := (*other)[key]; !present {
return false
} else {
if !inValue.DeepEqual(&otherValue) {
return false
}
}
}
}
return true
}
// DeepEqual is an autogenerated deepequal function, deeply comparing the
// receiver with other. in must be non-nil.
func (in *OpLabels) DeepEqual(other *OpLabels) bool {
if other == nil {
return false
}
if ((in.Custom != nil) && (other.Custom != nil)) || ((in.Custom == nil) != (other.Custom == nil)) {
in, other := &in.Custom, &other.Custom
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.OrchestrationIdentity != nil) && (other.OrchestrationIdentity != nil)) || ((in.OrchestrationIdentity == nil) != (other.OrchestrationIdentity == nil)) {
in, other := &in.OrchestrationIdentity, &other.OrchestrationIdentity
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.Disabled != nil) && (other.Disabled != nil)) || ((in.Disabled == nil) != (other.Disabled == nil)) {
in, other := &in.Disabled, &other.Disabled
if other == nil || !in.DeepEqual(other) {
return false
}
}
if ((in.OrchestrationInfo != nil) && (other.OrchestrationInfo != nil)) || ((in.OrchestrationInfo == nil) != (other.OrchestrationInfo == nil)) {
in, other := &in.OrchestrationInfo, &other.OrchestrationInfo
if other == nil || !in.DeepEqual(other) {
return false
}
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !lockdebug
package lock
import (
"sync"
)
type internalRWMutex struct {
sync.RWMutex
}
func (i *internalRWMutex) UnlockIgnoreTime() {
i.RWMutex.Unlock()
}
type internalMutex struct {
sync.Mutex
}
func (i *internalMutex) UnlockIgnoreTime() {
i.Mutex.Unlock()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import "sync"
// Map is a thin generic wrapper around sync.Map. The sync.Map description from
// the standard library follows (and is also propagated to the corresponding
// methods) for users' convenience:
//
// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
// by multiple goroutines without additional locking or coordination.
// Loads, stores, and deletes run in amortized constant time.
//
// The Map type is specialized. Most code should use a plain Go map instead,
// with separate locking or coordination, for better type safety and to make it
// easier to maintain other invariants along with the map content.
//
// The Map type is optimized for two common use cases: (1) when the entry for a given
// key is only ever written once but read many times, as in caches that only grow,
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
// sets of keys. In these two cases, use of a Map may significantly reduce lock
// contention compared to a Go map paired with a separate Mutex or RWMutex.
//
// The zero Map is empty and ready for use. A Map must not be copied after first use.
type Map[K comparable, V any] sync.Map
// MapCmpValues is an extension of Map, which additionally wraps the two extra
// methods requiring values to be also of comparable type.
type MapCmpValues[K, V comparable] Map[K, V]
// Load returns the value stored in the map for a key, or the zero value if no
// value is present. The ok result indicates whether value was found in the map.
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
val, ok := (*sync.Map)(m).Load(key)
return m.convert(val, ok)
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
val, loaded := (*sync.Map)(m).LoadOrStore(key, value)
return val.(V), loaded
}
// LoadAndDelete deletes the value for a key, returning the previous value if any
// (zero value otherwise). The loaded result reports whether the key was present.
func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
val, loaded := (*sync.Map)(m).LoadAndDelete(key)
return m.convert(val, loaded)
}
// Store sets the value for a key.
func (m *Map[K, V]) Store(key K, value V) {
(*sync.Map)(m).Store(key, value)
}
// Swap swaps the value for a key and returns the previous value if any (zero
// value otherwise). The loaded result reports whether the key was present.
func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) {
val, loaded := (*sync.Map)(m).Swap(key, value)
return m.convert(val, loaded)
}
// Delete deletes the value for a key.
func (m *Map[K, V]) Delete(key K) {
(*sync.Map)(m).Delete(key)
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot of the Map's
// contents: no key will be visited more than once, but if the value for any key
// is stored or deleted concurrently (including by f), Range may reflect any
// mapping for that key from any point during the Range call. Range does not
// block other methods on the receiver; even f itself may call any method on m.
//
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
(*sync.Map)(m).Range(func(key, value any) bool {
return f(key.(K), value.(V))
})
}
// CompareAndDelete deletes the entry for key if its value is equal to old.
// If there is no current value for key in the map, CompareAndDelete returns false
// (even if the old value is the nil interface value).
func (m *MapCmpValues[K, V]) CompareAndDelete(key K, old V) (deleted bool) {
return (*sync.Map)(m).CompareAndDelete(key, old)
}
// CompareAndSwap swaps the old and new values for key if the value stored in
// the map is equal to old.
func (m *MapCmpValues[K, V]) CompareAndSwap(key K, old, new V) bool {
return (*sync.Map)(m).CompareAndSwap(key, old, new)
}
func (m *Map[K, V]) convert(value any, ok bool) (V, bool) {
if !ok {
return *new(V), false
}
return value.(V), true
}
func (m *Map[K, V]) IsEmpty() bool {
empty := true
check := func(_ K, _ V) bool {
empty = false
return false // returning false breaks the iteration
}
m.Range(check)
return empty
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"context"
"golang.org/x/sync/semaphore"
)
// SemaphoredMutex is a semaphored mutex that provides a RWLocker interface.
type SemaphoredMutex struct {
semaphore *semaphore.Weighted
}
// using the same value set in `go/src/rwmutex.go#rwmutexMaxReaders
const maxReaders = 1 << 30
// NewSemaphoredMutex returns a new SemaphoredMutex.
func NewSemaphoredMutex() SemaphoredMutex {
return SemaphoredMutex{
semaphore: semaphore.NewWeighted(maxReaders),
}
}
func (i *SemaphoredMutex) Lock() {
// It's fine ignoring error since the error is only caused by passing a
// context with a deadline.
i.semaphore.Acquire(context.Background(), maxReaders)
}
// UnlockToRLock releases the current lock for writing but it still keeps it
// for reading purposes.
func (i *SemaphoredMutex) UnlockToRLock() {
i.semaphore.Release(maxReaders - 1)
}
func (i *SemaphoredMutex) Unlock() {
i.semaphore.Release(maxReaders)
}
func (i *SemaphoredMutex) RLock() {
// It's fine ignoring error since the error is only caused by passing a
// context with a deadline.
i.semaphore.Acquire(context.Background(), 1)
}
func (i *SemaphoredMutex) RUnlock() {
i.semaphore.Release(1)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"sort"
"sync"
"sync/atomic"
"time"
)
// sortableMutexSeq is a global sequence counter for the creation of new
// SortableMutex's with unique sequence numbers.
var sortableMutexSeq atomic.Uint64
// sortableMutex implements SortableMutex. Not exported as the only way to
// initialize it is via NewSortableMutex().
type sortableMutex struct {
sync.Mutex
seq uint64
acquireDuration time.Duration
}
func (s *sortableMutex) Lock() {
start := time.Now()
s.Mutex.Lock()
s.acquireDuration += time.Since(start)
}
func (s *sortableMutex) Seq() uint64 { return s.seq }
func (s *sortableMutex) AcquireDuration() time.Duration { return s.acquireDuration }
// SortableMutex provides a Mutex that can be globally sorted with other
// sortable mutexes. This allows deadlock-safe locking of a set of mutexes
// as it guarantees consistent lock ordering.
type SortableMutex interface {
sync.Locker
Seq() uint64
AcquireDuration() time.Duration // The amount of time it took to acquire the lock
}
// SortableMutexes is a set of mutexes that can be locked in a safe order.
// Once Lock() is called it must not be mutated!
type SortableMutexes []SortableMutex
// Len implements sort.Interface.
func (s SortableMutexes) Len() int {
return len(s)
}
// Less implements sort.Interface.
func (s SortableMutexes) Less(i int, j int) bool {
return s[i].Seq() < s[j].Seq()
}
// Swap implements sort.Interface.
func (s SortableMutexes) Swap(i int, j int) {
s[i], s[j] = s[j], s[i]
}
// Lock sorts the mutexes, and then locks them in order. If any lock cannot be acquired,
// this will block while holding the locks with a lower sequence number.
func (s SortableMutexes) Lock() {
sort.Sort(s)
for _, mu := range s {
mu.Lock()
}
}
// Unlock locks the sorted set of mutexes locked by prior call to Lock().
func (s SortableMutexes) Unlock() {
for _, mu := range s {
mu.Unlock()
}
}
var _ sort.Interface = SortableMutexes{}
func NewSortableMutex() SortableMutex {
seq := sortableMutexSeq.Add(1)
return &sortableMutex{
seq: seq,
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package lock
import (
"sync"
"sync/atomic"
)
// A StoppableWaitGroup waits for a collection of goroutines to finish.
type StoppableWaitGroup struct {
noopDone chan struct{}
noopAdd chan struct{}
// i is the internal counter which can store tolerate negative values
// as opposed the golang's library WaitGroup.
i atomic.Int64
doneOnce, stopOnce sync.Once
}
// NewStoppableWaitGroup returns a new StoppableWaitGroup. When the 'Stop' is
// executed, following 'Add()' calls won't have any effect.
func NewStoppableWaitGroup() *StoppableWaitGroup {
return &StoppableWaitGroup{
noopDone: make(chan struct{}),
noopAdd: make(chan struct{}),
doneOnce: sync.Once{},
stopOnce: sync.Once{},
}
}
// Stop makes following 'Add()' to be considered a no-op.
// If all goroutines that have called Add also called Done, 'Wait()' will
// be immediately unblocked.
func (l *StoppableWaitGroup) Stop() {
l.stopOnce.Do(func() {
// We will do an Add here so we can perform a Done after we close
// the l.noopAdd channel.
done := l.Add()
close(l.noopAdd)
// Calling done() here so we know that in case 'l.i' will become zero
// it will trigger a close of l.noopDone channel.
done()
})
}
// Wait will return once all goroutines that have called Add also called
// Done and StoppableWaitGroup was stopped.
// Internally, Wait() returns once the internal counter becomes negative.
func (l *StoppableWaitGroup) Wait() {
<-l.noopDone
}
// WaitChannel will return a channel that will be closed once all goroutines
// that have called Add also called Done and StoppableWaitGroup was stopped.
func (l *StoppableWaitGroup) WaitChannel() <-chan struct{} {
return l.noopDone
}
// DoneFunc returned by Add() marks the goroutine as completed.
type DoneFunc func()
// Add adds the goroutine to the list of routines to that Wait() will have
// to wait before it returns.
// If the StoppableWaitGroup was stopped this will be a no-op.
// Returns a "done" function to mark the goroutine as completed. Wait() is
// unblocked once all done functions obtained before Stop() have been called.
func (l *StoppableWaitGroup) Add() DoneFunc {
select {
case <-l.noopAdd:
return func() {}
default:
l.i.Add(1)
var once sync.Once
return func() {
once.Do(l.done)
}
}
}
// done will decrement the number of goroutines the Wait() will have to wait
// before it returns.
// This function is a no-op once all goroutines that have called 'Add()' have
// also called 'Done()' and the StoppableWaitGroup was stopped.
func (l *StoppableWaitGroup) done() {
select {
case <-l.noopDone:
return
default:
select {
case <-l.noopAdd:
a := l.i.Add(-1)
if a <= 0 {
l.doneOnce.Do(func() {
close(l.noopDone)
})
}
default:
a := l.i.Add(-1)
select {
// in case the channel was close while we where in this default
// case we will need to check if 'a' is less than zero and close
// l.noopDone channel.
case <-l.noopAdd:
if a <= 0 {
l.doneOnce.Do(func() {
close(l.noopDone)
})
}
default:
}
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"time"
"golang.org/x/time/rate"
)
// Limiter is a wrapper around rate.Limiter that does not panic when
// the limiter is uninitialized. The wrapping also allows more logging
// specific functionality to be added later without changing all the call
// sites.
type Limiter struct {
bucket *rate.Limiter
}
// NewLimiter returns a new Limiter allowing log messages to be
// emitted on average once every 'interval' and upto 'burst' messages
// during any 'interval'.
func NewLimiter(interval time.Duration, burst int) Limiter {
return Limiter{
bucket: rate.NewLimiter(rate.Every(interval), burst),
}
}
// Allow returns true if the log message is allowed under the
// configured rate limit.
func (ll Limiter) Allow() bool {
if ll.bucket == nil {
return true // limiter not initialized => no limit
}
return ll.bucket.Allow()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logfields
import (
"fmt"
)
// Repr formats an object with the Printf %+v formatter
func Repr(s interface{}) string {
return fmt.Sprintf("%+v", s)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"os"
"regexp"
"strings"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
"k8s.io/klog/v2"
"github.com/cilium/cilium/pkg/logging/logfields"
)
type LogFormat string
const (
Syslog = "syslog"
LevelOpt = "level"
FormatOpt = "format"
LogFormatText LogFormat = "text"
LogFormatTextTimestamp LogFormat = "text-ts"
LogFormatJSON LogFormat = "json"
LogFormatJSONTimestamp LogFormat = "json-ts"
// DefaultLogFormat is the string representation of the default logrus.Formatter
// we want to use (possible values: text or json)
DefaultLogFormat LogFormat = LogFormatText
// DefaultLogFormatTimestamp is the string representation of the default logrus.Formatter
// including timestamps.
// We don't use this for general runtime logs since kubernetes log capture handles those.
// This is only used for applications such as CNI which is written to disk so we have no
// way to correlate with other logs.
DefaultLogFormatTimestamp LogFormat = LogFormatTextTimestamp
// DefaultLogLevel is the default log level we want to use for our logrus.Formatter
DefaultLogLevel logrus.Level = logrus.PanicLevel
)
// DefaultLogger is the base logrus logger. It is different from the logrus
// default to avoid external dependencies from writing out unexpectedly
var DefaultLogger = initializeDefaultLogger()
var klogErrorOverrides = []logLevelOverride{
{
// TODO: We can drop the misspelled case here once client-go version is bumped to include:
// https://github.com/kubernetes/client-go/commit/ae43527480ee9d8750fbcde3d403363873fd3d89
matcher: regexp.MustCompile("Failed to update lock (optimitically|optimistically).*falling back to slow path"),
targetLevel: logrus.PanicLevel,
},
}
func initializeKLog() error {
log := DefaultLogger.WithField(logfields.LogSubsys, "klog")
//Create a new flag set and set error handler
klogFlags := flag.NewFlagSet("cilium", flag.ExitOnError)
// Make sure that klog logging variables are initialized so that we can
// update them from this file.
klog.InitFlags(klogFlags)
// Make sure klog does not log to stderr as we want it to control the output
// of klog so we want klog to log the errors to each writer of each level.
klogFlags.Set("logtostderr", "false")
// We don't need all headers because logrus will already print them if
// necessary.
klogFlags.Set("skip_headers", "true")
errWriter, err := severityOverrideWriter(logrus.ErrorLevel, log, klogErrorOverrides)
if err != nil {
return fmt.Errorf("failed to setup klog error writer: %w", err)
}
klog.SetOutputBySeverity("INFO", log.WriterLevel(logrus.PanicLevel))
klog.SetOutputBySeverity("WARNING", log.WriterLevel(logrus.WarnLevel))
klog.SetOutputBySeverity("ERROR", errWriter)
klog.SetOutputBySeverity("FATAL", log.WriterLevel(logrus.FatalLevel))
// Do not repeat log messages on all severities in klog
klogFlags.Set("one_output", "true")
return nil
}
type logLevelOverride struct {
matcher *regexp.Regexp
targetLevel logrus.Level
}
func levelToPrintFunc(log *logrus.Entry, level logrus.Level) (func(args ...any), error) {
var printFunc func(args ...any)
switch level {
case logrus.PanicLevel:
printFunc = log.Info
case logrus.WarnLevel:
printFunc = log.Warn
case logrus.ErrorLevel:
printFunc = log.Error
default:
return nil, fmt.Errorf("unsupported log level %q", level)
}
return printFunc, nil
}
func severityOverrideWriter(level logrus.Level, log *logrus.Entry, overrides []logLevelOverride) (*io.PipeWriter, error) {
printFunc, err := levelToPrintFunc(log, level)
if err != nil {
return nil, err
}
reader, writer := io.Pipe()
for _, override := range overrides {
_, err := levelToPrintFunc(log, override.targetLevel)
if err != nil {
return nil, fmt.Errorf("failed to validate klog matcher level overrides (%s -> %s): %w",
override.matcher.String(), level, err)
}
}
go writerScanner(log, reader, printFunc, overrides)
return writer, nil
}
// writerScanner scans the input from the reader and writes it to the appropriate
// log print func.
// In cases where the log message is overridden, that will be emitted via the specified
// target log level logger function.
//
// Based on code from logrus WriterLevel implementation [1]
//
// [1] https://github.com/sirupsen/logrus/blob/v1.9.3/writer.go#L66-L97
func writerScanner(
entry *logrus.Entry,
reader *io.PipeReader,
defaultPrintFunc func(args ...interface{}),
overrides []logLevelOverride) {
defer reader.Close()
scanner := bufio.NewScanner(reader)
// Set the buffer size to the maximum token size to avoid buffer overflows
scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
// Define a split function to split the input into chunks of up to 64KB
chunkSize := bufio.MaxScanTokenSize // 64KB
splitFunc := func(data []byte, atEOF bool) (int, []byte, error) {
if len(data) >= chunkSize {
return chunkSize, data[:chunkSize], nil
}
return bufio.ScanLines(data, atEOF)
}
// Use the custom split function to split the input
scanner.Split(splitFunc)
// Scan the input and write it to the logger using the specified print function
for scanner.Scan() {
line := scanner.Text()
matched := false
for _, override := range overrides {
printFn, err := levelToPrintFunc(entry, override.targetLevel)
if err != nil {
entry.WithError(err).WithField("matcher", override.matcher).
Error("BUG: failed to get printer for klog override matcher")
continue
}
if override.matcher.FindString(line) != "" {
printFn(strings.TrimRight(line, "\r\n"))
matched = true
break
}
}
if !matched {
defaultPrintFunc(strings.TrimRight(scanner.Text(), "\r\n"))
}
}
if err := scanner.Err(); err != nil {
entry.WithError(err).Error("klog logrus override scanner stopped scanning with an error. " +
"This may mean that k8s client-go logs will no longer be emitted")
}
}
// LogOptions maps configuration key-value pairs related to logging.
type LogOptions map[string]string
// initializeDefaultLogger returns a logrus Logger with the default logging
// settings.
func initializeDefaultLogger() (logger *logrus.Logger) {
logger = logrus.New()
logger.SetFormatter(GetFormatter(DefaultLogFormatTimestamp))
logger.SetLevel(DefaultLogLevel)
return
}
// GetLogLevel returns the log level specified in the provided LogOptions. If
// it is not set in the options, it will return the default level.
func (o LogOptions) GetLogLevel() (level logrus.Level) {
levelOpt, ok := o[LevelOpt]
if !ok {
return DefaultLogLevel
}
var err error
if level, err = logrus.ParseLevel(levelOpt); err != nil {
logrus.WithError(err).Warning("Ignoring user-configured log level")
return DefaultLogLevel
}
return
}
// GetLogFormat returns the log format specified in the provided LogOptions. If
// it is not set in the options or is invalid, it will return the default format.
func (o LogOptions) GetLogFormat() LogFormat {
formatOpt, ok := o[FormatOpt]
if !ok {
return DefaultLogFormatTimestamp
}
formatOpt = strings.ToLower(formatOpt)
re := regexp.MustCompile(`^(text|text-ts|json|json-ts)$`)
if !re.MatchString(formatOpt) {
logrus.WithError(
fmt.Errorf("incorrect log format configured '%s', expected 'text', 'text-ts', 'json' or 'json-ts'", formatOpt),
).Warning("Ignoring user-configured log format")
return DefaultLogFormatTimestamp
}
return LogFormat(formatOpt)
}
// SetLogLevel updates the DefaultLogger with a new logrus.Level
func SetLogLevel(logLevel logrus.Level) {
DefaultLogger.SetLevel(logLevel)
}
// SetDefaultLogLevel updates the DefaultLogger with the DefaultLogLevel
func SetDefaultLogLevel() {
DefaultLogger.SetLevel(DefaultLogLevel)
}
// SetLogLevelToDebug updates the DefaultLogger with the logrus.DebugLevel
func SetLogLevelToDebug() {
DefaultLogger.SetLevel(logrus.DebugLevel)
}
// SetLogFormat updates the DefaultLogger with a new LogFormat
func SetLogFormat(logFormat LogFormat) {
DefaultLogger.SetFormatter(GetFormatter(logFormat))
}
// SetDefaultLogFormat updates the DefaultLogger with the DefaultLogFormat
func SetDefaultLogFormat() {
DefaultLogger.SetFormatter(GetFormatter(DefaultLogFormatTimestamp))
}
// AddHooks adds additional logrus hook to default logger
func AddHooks(hooks ...logrus.Hook) {
for _, hook := range hooks {
DefaultLogger.AddHook(hook)
}
}
// SetupLogging sets up each logging service provided in loggers and configures
// each logger with the provided logOpts.
func SetupLogging(loggers []string, logOpts LogOptions, tag string, debug bool) error {
// Bridge klog to logrus. Note that this will open multiple pipes and fork
// background goroutines that are not cleaned up.
initializeKLog()
if debug {
logOpts[LevelOpt] = "debug"
}
initializeSlog(logOpts, len(loggers) == 0)
// Updating the default log format
SetLogFormat(logOpts.GetLogFormat())
// Set default logger to output to stdout if no loggers are provided.
if len(loggers) == 0 {
// TODO: switch to a per-logger version when we upgrade to logrus >1.0.3
logrus.SetOutput(os.Stdout)
}
// Updating the default log level, overriding the log options if the debug arg is being set
if debug {
SetLogLevelToDebug()
} else {
SetLogLevel(logOpts.GetLogLevel())
}
// always suppress the default logger so libraries don't print things
logrus.SetLevel(logrus.PanicLevel)
// Iterate through all provided loggers and configure them according
// to user-provided settings.
for _, logger := range loggers {
switch logger {
case Syslog:
if err := setupSyslog(logOpts, tag, debug); err != nil {
return fmt.Errorf("failed to set up syslog: %w", err)
}
default:
return fmt.Errorf("provided log driver %q is not a supported log driver", logger)
}
}
return nil
}
// GetFormatter returns a configured logrus.Formatter with some specific values
// we want to have
func GetFormatter(format LogFormat) logrus.Formatter {
switch format {
case LogFormatText:
return &logrus.TextFormatter{
DisableTimestamp: true,
DisableColors: true,
}
case LogFormatTextTimestamp:
return &logrus.TextFormatter{
DisableTimestamp: false,
TimestampFormat: time.RFC3339Nano,
DisableColors: true,
}
case LogFormatJSON:
return &logrus.JSONFormatter{
DisableTimestamp: true,
}
case LogFormatJSONTimestamp:
return &logrus.JSONFormatter{
DisableTimestamp: false,
TimestampFormat: time.RFC3339Nano,
}
}
return nil
}
// validateOpts iterates through all of the keys and values in logOpts, and errors out if
// the key in logOpts is not a key in supportedOpts, or the value of corresponding key is
// not listed in the value of validKVs.
func (o LogOptions) validateOpts(logDriver string, supportedOpts map[string]bool, validKVs map[string][]string) error {
for k, v := range o {
if !supportedOpts[k] {
return fmt.Errorf("provided configuration key %q is not supported as a logging option for log driver %s", k, logDriver)
}
if validValues, ok := validKVs[k]; ok {
valid := false
for _, vv := range validValues {
if v == vv {
valid = true
break
}
}
if !valid {
return fmt.Errorf("provided configuration value %q is not a valid value for %q in log driver %s, valid values: %v", v, k, logDriver, validValues)
}
}
}
return nil
}
// getLogDriverConfig returns a map containing the key-value pairs that start
// with string logDriver from map logOpts.
func getLogDriverConfig(logDriver string, logOpts LogOptions) LogOptions {
keysToValidate := make(LogOptions)
for k, v := range logOpts {
ok, err := regexp.MatchString(logDriver+".*", k)
if err != nil {
DefaultLogger.Fatal(err)
}
if ok {
keysToValidate[k] = v
}
}
return keysToValidate
}
// MultiLine breaks a multi line text into individual log entries and calls the
// logging function to log each entry
func MultiLine(logFn func(args ...interface{}), output string) {
scanner := bufio.NewScanner(bytes.NewReader([]byte(output)))
for scanner.Scan() {
logFn(scanner.Text())
}
}
// CanLogAt returns whether a log message at the given level would be
// logged by the given logger.
func CanLogAt(logger *logrus.Logger, level logrus.Level) bool {
return GetLevel(logger) >= level
}
// GetLevel returns the log level of the given logger.
func GetLevel(logger *logrus.Logger) logrus.Level {
return logrus.Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !windows
package logging
import (
"log/syslog"
"github.com/sirupsen/logrus"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
)
const (
SLevel = "syslog.level"
SNetwork = "syslog.network"
SAddress = "syslog.address"
SSeverity = "syslog.severity"
SFacility = "syslog.facility"
STag = "syslog.tag"
)
var (
// syslogOpts is the set of supported options for syslog configuration.
syslogOpts = map[string]bool{
SLevel: true,
SNetwork: true,
SAddress: true,
SSeverity: true,
SFacility: true,
STag: true,
}
// From /usr/include/sys/syslog.h.
syslogSeverityMap = map[string]syslog.Priority{
"emerg": syslog.LOG_EMERG,
"panic": syslog.LOG_EMERG,
"alert": syslog.LOG_ALERT,
"crit": syslog.LOG_CRIT,
"err": syslog.LOG_ERR,
"error": syslog.LOG_ERR,
"warn": syslog.LOG_WARNING,
"warning": syslog.LOG_WARNING,
"notice": syslog.LOG_NOTICE,
"info": syslog.LOG_INFO,
"debug": syslog.LOG_DEBUG,
}
// From /usr/include/sys/syslog.h.
syslogFacilityMap = map[string]syslog.Priority{
"kern": syslog.LOG_KERN,
"user": syslog.LOG_USER,
"mail": syslog.LOG_MAIL,
"daemon": syslog.LOG_DAEMON,
"auth": syslog.LOG_AUTH,
"syslog": syslog.LOG_SYSLOG,
"lpr": syslog.LOG_LPR,
"news": syslog.LOG_NEWS,
"uucp": syslog.LOG_UUCP,
"cron": syslog.LOG_CRON,
"authpriv": syslog.LOG_AUTHPRIV,
"ftp": syslog.LOG_FTP,
"local0": syslog.LOG_LOCAL0,
"local1": syslog.LOG_LOCAL1,
"local2": syslog.LOG_LOCAL2,
"local3": syslog.LOG_LOCAL3,
"local4": syslog.LOG_LOCAL4,
"local5": syslog.LOG_LOCAL5,
"local6": syslog.LOG_LOCAL6,
"local7": syslog.LOG_LOCAL7,
}
// syslogLevelMap maps logrus.Level values to syslog.Priority levels.
syslogLevelMap = map[logrus.Level]syslog.Priority{
logrus.PanicLevel: syslog.LOG_ALERT,
logrus.FatalLevel: syslog.LOG_CRIT,
logrus.ErrorLevel: syslog.LOG_ERR,
logrus.WarnLevel: syslog.LOG_WARNING,
logrus.InfoLevel: syslog.LOG_INFO,
logrus.DebugLevel: syslog.LOG_DEBUG,
logrus.TraceLevel: syslog.LOG_DEBUG,
}
)
func mapStringPriorityToSlice(m map[string]syslog.Priority) []string {
s := make([]string, 0, len(m))
for k := range m {
s = append(s, k)
}
return s
}
// setupSyslog sets up and configures syslog with the provided options in
// logOpts. If some options are not provided, sensible defaults are used.
func setupSyslog(logOpts LogOptions, tag string, debug bool) error {
opts := getLogDriverConfig(Syslog, logOpts)
syslogOptValues := make(map[string][]string)
syslogOptValues[SSeverity] = mapStringPriorityToSlice(syslogSeverityMap)
syslogOptValues[SFacility] = mapStringPriorityToSlice(syslogFacilityMap)
if err := opts.validateOpts(Syslog, syslogOpts, syslogOptValues); err != nil {
return err
}
if stag, ok := opts[STag]; ok {
tag = stag
}
logLevel, ok := opts[SLevel]
if !ok {
if debug {
logLevel = "debug"
} else {
logLevel = "info"
}
}
// Validate provided log level.
level, err := logrus.ParseLevel(logLevel)
if err != nil {
DefaultLogger.Fatal(err)
}
SetLogLevel(level)
network := ""
address := ""
// Inherit severity from log level if syslog.severity is not specified explicitly
severity := syslogLevelMap[level]
// Default values for facility if not specified
facility := syslog.LOG_KERN
if networkStr, ok := opts[SNetwork]; ok {
network = networkStr
}
if addressStr, ok := opts[SAddress]; ok {
address = addressStr
}
if severityStr, ok := opts[SSeverity]; ok {
severity = syslogSeverityMap[severityStr]
}
if facilityStr, ok := opts[SFacility]; ok {
facility = syslogFacilityMap[facilityStr]
}
// Create syslog hook.
h, err := logrus_syslog.NewSyslogHook(network, address, severity|facility, tag)
if err != nil {
DefaultLogger.Fatal(err)
}
// TODO: switch to a per-logger version when we upgrade to logrus >1.0.3
logrus.AddHook(h)
DefaultLogger.AddHook(h)
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package logging
import (
"context"
"log/slog"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/logging/logfields"
)
// logrErrorKey is the key used by the logr library for the error parameter.
const logrErrorKey = "err"
// SlogNopHandler discards all logs.
var SlogNopHandler slog.Handler = nopHandler{}
type nopHandler struct{}
func (nopHandler) Enabled(context.Context, slog.Level) bool { return false }
func (nopHandler) Handle(context.Context, slog.Record) error { return nil }
func (n nopHandler) WithAttrs([]slog.Attr) slog.Handler { return n }
func (n nopHandler) WithGroup(string) slog.Handler { return n }
var slogHandlerOpts = &slog.HandlerOptions{
AddSource: false,
Level: slog.LevelInfo,
ReplaceAttr: replaceAttrFnWithoutTimestamp,
}
// Default slog logger. Will be overwritten once initializeSlog is called.
var DefaultSlogLogger *slog.Logger = slog.New(slog.NewTextHandler(
os.Stderr,
slogHandlerOpts,
))
func slogLevel(l logrus.Level) slog.Level {
switch l {
case logrus.DebugLevel, logrus.TraceLevel:
return slog.LevelDebug
case logrus.InfoLevel:
return slog.LevelInfo
case logrus.WarnLevel:
return slog.LevelWarn
case logrus.ErrorLevel, logrus.PanicLevel, logrus.FatalLevel:
return slog.LevelError
default:
return slog.LevelInfo
}
}
// Approximates the logrus output via slog for job groups during the transition
// phase.
func initializeSlog(logOpts LogOptions, useStdout bool) {
opts := *slogHandlerOpts
opts.Level = slogLevel(logOpts.GetLogLevel())
logFormat := logOpts.GetLogFormat()
switch logFormat {
case LogFormatJSON, LogFormatText:
opts.ReplaceAttr = replaceAttrFnWithoutTimestamp
case LogFormatJSONTimestamp, LogFormatTextTimestamp:
opts.ReplaceAttr = replaceAttrFn
}
writer := os.Stderr
if useStdout {
writer = os.Stdout
}
switch logFormat {
case LogFormatJSON, LogFormatJSONTimestamp:
DefaultSlogLogger = slog.New(slog.NewJSONHandler(
writer,
&opts,
))
case LogFormatText, LogFormatTextTimestamp:
DefaultSlogLogger = slog.New(slog.NewTextHandler(
writer,
&opts,
))
}
}
func replaceAttrFn(groups []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.TimeKey:
// Adjust to timestamp format that logrus uses; except that we can't
// force slog to quote the value like logrus does...
return slog.String(slog.TimeKey, a.Value.Time().Format(time.RFC3339))
case slog.LevelKey:
// Lower-case the log level
return slog.Attr{
Key: a.Key,
Value: slog.StringValue(strings.ToLower(a.Value.String())),
}
case logrErrorKey:
// Uniform the attribute identifying the error
return slog.Attr{
Key: logfields.Error,
Value: a.Value,
}
}
return a
}
func replaceAttrFnWithoutTimestamp(groups []string, a slog.Attr) slog.Attr {
switch a.Key {
case slog.TimeKey:
// Drop timestamps
return slog.Attr{}
default:
return replaceAttrFn(groups, a)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package mac
import (
"bytes"
"crypto/rand"
"encoding/hex"
"fmt"
"net"
)
// Untagged ethernet (IEEE 802.3) frame header len
const EthHdrLen = 14
// Uint64MAC is the __u64 representation of a MAC address.
// It corresponds to the C mac_t type used in bpf/.
type Uint64MAC uint64
func (m Uint64MAC) String() string {
return fmt.Sprintf("%02X:%02X:%02X:%02X:%02X:%02X",
uint64((m & 0x0000000000FF)),
uint64((m&0x00000000FF00)>>8),
uint64((m&0x000000FF0000)>>16),
uint64((m&0x0000FF000000)>>24),
uint64((m&0x00FF00000000)>>32),
uint64((m&0xFF0000000000)>>40),
)
}
// MAC address is an net.HardwareAddr encapsulation to force cilium to only use MAC-48.
type MAC net.HardwareAddr
// String returns the string representation of m.
func (m MAC) String() string {
return net.HardwareAddr(m).String()
}
// ParseMAC parses s only as an IEEE 802 MAC-48.
func ParseMAC(s string) (MAC, error) {
ha, err := net.ParseMAC(s)
if err != nil {
return nil, err
}
if len(ha) != 6 {
return nil, fmt.Errorf("invalid MAC address %s", s)
}
return MAC(ha), nil
}
// Uint64 returns the MAC in uint64 format. The MAC is represented as little-endian in
// the returned value.
// Example:
//
// m := MAC([]{0x11, 0x12, 0x23, 0x34, 0x45, 0x56})
// v, err := m.Uint64()
// fmt.Printf("0x%X", v) // 0x564534231211
func (m MAC) Uint64() (Uint64MAC, error) {
if len(m) != 6 {
return 0, fmt.Errorf("invalid MAC address %s", m.String())
}
res := uint64(m[5])<<40 | uint64(m[4])<<32 | uint64(m[3])<<24 |
uint64(m[2])<<16 | uint64(m[1])<<8 | uint64(m[0])
return Uint64MAC(res), nil
}
func (m MAC) MarshalJSON() ([]byte, error) {
if len(m) == 0 {
return []byte(`""`), nil
}
if len(m) != 6 {
return nil, fmt.Errorf("invalid MAC address length %s", string(m))
}
return []byte(fmt.Sprintf("\"%02x:%02x:%02x:%02x:%02x:%02x\"", m[0], m[1], m[2], m[3], m[4], m[5])), nil
}
func (m MAC) MarshalIndentJSON(prefix, indent string) ([]byte, error) {
return m.MarshalJSON()
}
func (m *MAC) UnmarshalJSON(data []byte) error {
if len(data) == len([]byte(`""`)) {
if m == nil {
m = new(MAC)
}
*m = MAC{}
return nil
}
if len(data) != 19 {
return fmt.Errorf("invalid MAC address length %s", string(data))
}
data = data[1 : len(data)-1]
macStr := bytes.Replace(data, []byte(`:`), []byte(``), -1)
if len(macStr) != 12 {
return fmt.Errorf("invalid MAC address format")
}
macByte := make([]byte, len(macStr))
hex.Decode(macByte, macStr)
*m = MAC{macByte[0], macByte[1], macByte[2], macByte[3], macByte[4], macByte[5]}
return nil
}
// GenerateRandMAC generates a random unicast and locally administered MAC address.
func GenerateRandMAC() (MAC, error) {
buf := make([]byte, 6)
if _, err := rand.Read(buf); err != nil {
return nil, fmt.Errorf("Unable to retrieve 6 rnd bytes: %w", err)
}
// Set locally administered addresses bit and reset multicast bit
buf[0] = (buf[0] | 0x02) & 0xfe
return buf, nil
}
// HaveMACAddrs returns true if all given network interfaces have L2 addr.
func HaveMACAddrs(ifaces []string) bool {
for _, iface := range ifaces {
if !HasMacAddr(iface) {
return false
}
}
return true
}
// CArrayString returns a string which can be used for assigning the given
// MAC addr to "union macaddr" in C.
func CArrayString(m net.HardwareAddr) string {
if m == nil || len(m) != 6 {
return "{0x0,0x0,0x0,0x0,0x0,0x0}"
}
return fmt.Sprintf("{0x%x,0x%x,0x%x,0x%x,0x%x,0x%x}",
m[0], m[1], m[2], m[3], m[4], m[5])
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package mac
import (
"errors"
"net"
"github.com/vishvananda/netlink"
"github.com/cilium/cilium/pkg/datapath/linux/safenetlink"
)
// HasMacAddr returns true if the given network interface has L2 addr.
func HasMacAddr(iface string) bool {
link, err := safenetlink.LinkByName(iface)
if err != nil {
return false
}
return LinkHasMacAddr(link)
}
// LinkHasMacAddr returns true if the given network interface has L2 addr.
func LinkHasMacAddr(link netlink.Link) bool {
return len(link.Attrs().HardwareAddr) != 0
}
// ReplaceMacAddressWithLinkName replaces the MAC address of the given link
func ReplaceMacAddressWithLinkName(ifName, macAddress string) error {
l, err := safenetlink.LinkByName(ifName)
if err != nil {
if errors.As(err, &netlink.LinkNotFoundError{}) {
return nil
}
return err
}
hw, err := net.ParseMAC(macAddress)
if err != nil {
return err
}
return netlink.LinkSetHardwareAddr(l, hw)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package format
import (
"strconv"
"strings"
"github.com/spf13/pflag"
)
// Uint16Flags is a slice of unsigned 16-bit ints with some convenience methods.
type Uint16Flags []uint16
var _ pflag.Value = &Uint16Flags{}
// String provides a human-readable string format of the received variable.
func (i *Uint16Flags) String() string {
pieces := make([]string, 0, len(*i))
for _, v := range *i {
pieces = append(pieces, strconv.Itoa(int(v)))
}
return strings.Join(pieces, ", ")
}
// Set converts the specified value into an integer and appends it to the flags.
// Returns an error if the value cannot be converted to a 16-bit unsigned value.
func (i *Uint16Flags) Set(value string) error {
vUint64, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
*i = append(*i, uint16(vUint64))
return nil
}
// Type returns a human-readable string representing the type of the receiver.
func (i *Uint16Flags) Type() string {
return "[]uint16"
}
// Has returns true of value exist
func (i *Uint16Flags) Has(value uint16) bool {
for _, v := range *i {
if v == value {
return true
}
}
return false
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package format
import (
"bytes"
"encoding/binary"
"encoding/gob"
"fmt"
"github.com/cilium/cilium/pkg/byteorder"
"github.com/cilium/cilium/pkg/hubble/parser/getters"
"github.com/cilium/cilium/pkg/monitor"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/monitor/payload"
)
// Verbosity levels for formatting output.
type Verbosity uint8
const (
msgSeparator = "------------------------------------------------------------------------------"
// INFO is the level of verbosity in which summaries of Drop and Capture
// messages are printed out when the monitor is invoked
INFO Verbosity = iota + 1
// DEBUG is the level of verbosity in which more information about packets
// is printed than in INFO mode. Debug, Drop, and Capture messages are printed.
DEBUG
// VERBOSE is the level of verbosity in which the most information possible
// about packets is printed out. Currently is not utilized.
VERBOSE
// JSON is the level of verbosity in which event information is printed out in json format
JSON
)
// MonitorFormatter filters and formats monitor messages from a buffer.
type MonitorFormatter struct {
EventTypes monitorAPI.MessageTypeFilter
FromSource Uint16Flags
ToDst Uint16Flags
Related Uint16Flags
Hex bool
JSONOutput bool
Verbosity Verbosity
Numeric bool
linkMonitor getters.LinkGetter
}
// NewMonitorFormatter returns a new formatter with default configuration.
func NewMonitorFormatter(verbosity Verbosity, linkMonitor getters.LinkGetter) *MonitorFormatter {
return &MonitorFormatter{
Hex: false,
EventTypes: monitorAPI.MessageTypeFilter{},
FromSource: Uint16Flags{},
ToDst: Uint16Flags{},
Related: Uint16Flags{},
JSONOutput: false,
Verbosity: verbosity,
Numeric: bool(monitor.DisplayLabel),
linkMonitor: linkMonitor,
}
}
// match checks if the event type, from endpoint and / or to endpoint match
// when they are supplied. The either part of from and to endpoint depends on
// related to, which can match on both. If either one of them is less than or
// equal to zero, then it is assumed user did not use them.
func (m *MonitorFormatter) match(messageType int, src uint16, dst uint16) bool {
if len(m.EventTypes) > 0 && !m.EventTypes.Contains(messageType) {
return false
} else if len(m.FromSource) > 0 && !m.FromSource.Has(src) {
return false
} else if len(m.ToDst) > 0 && !m.ToDst.Has(dst) {
return false
} else if len(m.Related) > 0 && !m.Related.Has(src) && !m.Related.Has(dst) {
return false
}
return true
}
// dropEvents prints out all the received drop notifications.
func (m *MonitorFormatter) dropEvents(prefix string, data []byte) {
dn := monitor.DropNotify{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &dn); err != nil {
fmt.Printf("Error while parsing drop notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeDrop, dn.Source, uint16(dn.DstID)) {
switch m.Verbosity {
case INFO, DEBUG:
dn.DumpInfo(data, monitor.DisplayFormat(m.Numeric))
case JSON:
dn.DumpJSON(data, prefix)
default:
fmt.Println(msgSeparator)
dn.DumpVerbose(!m.Hex, data, prefix, monitor.DisplayFormat(m.Numeric))
}
}
}
// traceEvents prints out all the received trace notifications.
func (m *MonitorFormatter) traceEvents(prefix string, data []byte) {
tn := monitor.TraceNotify{}
if err := monitor.DecodeTraceNotify(data, &tn); err != nil {
fmt.Printf("Error while parsing trace notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeTrace, tn.Source, tn.DstID) {
switch m.Verbosity {
case INFO, DEBUG:
tn.DumpInfo(data, monitor.DisplayFormat(m.Numeric), m.linkMonitor)
case JSON:
tn.DumpJSON(data, prefix, m.linkMonitor)
default:
fmt.Println(msgSeparator)
tn.DumpVerbose(!m.Hex, data, prefix, monitor.DisplayFormat(m.Numeric), m.linkMonitor)
}
}
}
func (m *MonitorFormatter) traceSockEvents(prefix string, data []byte) {
tn := monitor.TraceSockNotify{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &tn); err != nil {
fmt.Printf("Error while parsing socket trace notification message: %s\n", err)
}
// Currently only printed with the debug option. Extend it to info and json.
// GH issue: https://github.com/cilium/cilium/issues/21510
if m.Verbosity == DEBUG {
tn.DumpDebug(prefix)
}
}
func (m *MonitorFormatter) policyVerdictEvents(prefix string, data []byte) {
pn := monitor.PolicyVerdictNotify{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &pn); err != nil {
fmt.Printf("Error while parsing policy notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypePolicyVerdict, pn.Source, uint16(pn.RemoteLabel)) {
pn.DumpInfo(data, monitor.DisplayFormat(m.Numeric))
}
}
func (m *MonitorFormatter) recorderCaptureEvents(prefix string, data []byte) {
rc := monitor.RecorderCapture{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &rc); err != nil {
fmt.Printf("Error while parsing capture record: %s\n", err)
}
if m.match(monitorAPI.MessageTypeRecCapture, 0, 0) {
rc.DumpInfo(data)
}
}
// debugEvents prints out all the debug messages.
func (m *MonitorFormatter) debugEvents(prefix string, data []byte) {
dm := monitor.DebugMsg{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &dm); err != nil {
fmt.Printf("Error while parsing debug message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeDebug, dm.Source, 0) {
switch m.Verbosity {
case INFO:
dm.DumpInfo(data)
case JSON:
dm.DumpJSON(prefix, m.linkMonitor)
default:
dm.Dump(prefix, m.linkMonitor)
}
}
}
// captureEvents prints out all the capture messages.
func (m *MonitorFormatter) captureEvents(prefix string, data []byte) {
dc := monitor.DebugCapture{}
if err := binary.Read(bytes.NewReader(data), byteorder.Native, &dc); err != nil {
fmt.Printf("Error while parsing debug capture message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeCapture, dc.Source, 0) {
switch m.Verbosity {
case INFO, DEBUG:
dc.DumpInfo(data, m.linkMonitor)
case JSON:
dc.DumpJSON(data, prefix, m.linkMonitor)
default:
fmt.Println(msgSeparator)
dc.DumpVerbose(!m.Hex, data, prefix)
}
}
}
// logRecordEvents prints out LogRecord events
func (m *MonitorFormatter) logRecordEvents(prefix string, data []byte) {
buf := bytes.NewBuffer(data[1:])
dec := gob.NewDecoder(buf)
lr := monitor.LogRecordNotify{}
if err := dec.Decode(&lr); err != nil {
fmt.Printf("Error while decoding LogRecord notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeAccessLog, uint16(lr.SourceEndpoint.ID), uint16(lr.DestinationEndpoint.ID)) {
if m.Verbosity == JSON {
lr.DumpJSON()
} else {
lr.DumpInfo()
}
}
}
// agentEvents prints out agent events
func (m *MonitorFormatter) agentEvents(prefix string, data []byte) {
buf := bytes.NewBuffer(data[1:])
dec := gob.NewDecoder(buf)
an := monitorAPI.AgentNotify{}
if err := dec.Decode(&an); err != nil {
fmt.Printf("Error while decoding agent notification message: %s\n", err)
}
if m.match(monitorAPI.MessageTypeAgent, 0, 0) {
if m.Verbosity == JSON {
an.DumpJSON()
} else {
an.DumpInfo()
}
}
}
// FormatSample prints an event from the provided raw data slice to stdout.
//
// For most monitor event types, 'data' corresponds to the 'data' field in
// bpf.PerfEventSample. Exceptions are MessageTypeAccessLog and
// MessageTypeAgent.
func (m *MonitorFormatter) FormatSample(data []byte, cpu int) {
prefix := fmt.Sprintf("CPU %02d:", cpu)
messageType := data[0]
switch messageType {
case monitorAPI.MessageTypeDrop:
m.dropEvents(prefix, data)
case monitorAPI.MessageTypeDebug:
m.debugEvents(prefix, data)
case monitorAPI.MessageTypeCapture:
m.captureEvents(prefix, data)
case monitorAPI.MessageTypeTrace:
m.traceEvents(prefix, data)
case monitorAPI.MessageTypeAccessLog:
m.logRecordEvents(prefix, data)
case monitorAPI.MessageTypeAgent:
m.agentEvents(prefix, data)
case monitorAPI.MessageTypePolicyVerdict:
m.policyVerdictEvents(prefix, data)
case monitorAPI.MessageTypeRecCapture:
m.recorderCaptureEvents(prefix, data)
case monitorAPI.MessageTypeTraceSock:
m.traceSockEvents(prefix, data)
default:
fmt.Printf("%s Unknown event: %+v\n", prefix, data)
}
}
// LostEvent formats a lost event using the specified payload parameters.
func LostEvent(lost uint64, cpu int) {
fmt.Printf("CPU %02d: Lost %d events\n", cpu, lost)
}
// FormatEvent formats an event from the specified payload to stdout.
//
// Returns true if the event was successfully printed, false otherwise.
func (m *MonitorFormatter) FormatEvent(pl *payload.Payload) bool {
switch pl.Type {
case payload.EventSample:
m.FormatSample(pl.Data, pl.CPU)
case payload.RecordLost:
LostEvent(pl.Lost, pl.CPU)
default:
return false
}
return true
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"bytes"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"math"
"net"
"net/netip"
"os"
"path/filepath"
"regexp"
"runtime"
"slices"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/mackerelio/go-osstat/memory"
"github.com/sirupsen/logrus"
"github.com/spf13/cast"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
k8sLabels "k8s.io/apimachinery/pkg/labels"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/cidr"
clustermeshTypes "github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/command"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/ip"
ipamOption "github.com/cilium/cilium/pkg/ipam/option"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/mac"
"github.com/cilium/cilium/pkg/time"
"github.com/cilium/cilium/pkg/version"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "config")
)
const (
// AgentHealthPort is the TCP port for agent health status API
AgentHealthPort = "agent-health-port"
// ClusterHealthPort is the TCP port for cluster-wide network connectivity health API
ClusterHealthPort = "cluster-health-port"
// ClusterMeshHealthPort is the TCP port for ClusterMesh apiserver health API
ClusterMeshHealthPort = "clustermesh-health-port"
// AgentLabels are additional labels to identify this agent
AgentLabels = "agent-labels"
// AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in policy.
AllowICMPFragNeeded = "allow-icmp-frag-needed"
// AllowLocalhost is the policy when to allow local stack to reach local endpoints { auto | always | policy }
AllowLocalhost = "allow-localhost"
// AllowLocalhostAuto defaults to policy except when running in
// Kubernetes where it then defaults to "always"
AllowLocalhostAuto = "auto"
// AllowLocalhostAlways always allows the local stack to reach local
// endpoints
AllowLocalhostAlways = "always"
// AllowLocalhostPolicy requires a policy rule to allow the local stack
// to reach particular endpoints or policy enforcement must be
// disabled.
AllowLocalhostPolicy = "policy"
// AnnotateK8sNode enables annotating a kubernetes node while bootstrapping
// the daemon, which can also be disabled using this option.
AnnotateK8sNode = "annotate-k8s-node"
// ARPPingRefreshPeriod is the ARP entries refresher period
ARPPingRefreshPeriod = "arping-refresh-period"
// EnableL2NeighDiscovery determines if cilium should perform L2 neighbor
// discovery.
EnableL2NeighDiscovery = "enable-l2-neigh-discovery"
// BPFRoot is the Path to BPF filesystem
BPFRoot = "bpf-root"
// CGroupRoot is the path to Cgroup2 filesystem
CGroupRoot = "cgroup-root"
// CompilerFlags allow to specify extra compiler commands for advanced debugging
CompilerFlags = "cflags"
// ConfigFile is the Configuration file (default "$HOME/ciliumd.yaml")
ConfigFile = "config"
// ConfigDir is the directory that contains a file for each option where
// the filename represents the option name and the content of that file
// represents the value of that option.
ConfigDir = "config-dir"
// ConntrackGCInterval is the name of the ConntrackGCInterval option
ConntrackGCInterval = "conntrack-gc-interval"
// ConntrackGCMaxInterval is the name of the ConntrackGCMaxInterval option
ConntrackGCMaxInterval = "conntrack-gc-max-interval"
// DebugArg is the argument enables debugging mode
DebugArg = "debug"
// DebugVerbose is the argument enables verbose log message for particular subsystems
DebugVerbose = "debug-verbose"
// Devices facing cluster/external network for attaching bpf_host
Devices = "devices"
// Forces the auto-detection of devices, even if specific devices are explicitly listed
ForceDeviceDetection = "force-device-detection"
// DirectRoutingDevice is the name of a device used to connect nodes in
// direct routing mode (only required by BPF NodePort)
DirectRoutingDevice = "direct-routing-device"
// EnablePolicy enables policy enforcement in the agent.
EnablePolicy = "enable-policy"
// EnableExternalIPs enables implementation of k8s services with externalIPs in datapath
EnableExternalIPs = "enable-external-ips"
// EnableL7Proxy is the name of the option to enable L7 proxy
EnableL7Proxy = "enable-l7-proxy"
// EnableTracing enables tracing mode in the agent.
EnableTracing = "enable-tracing"
// EnableIPIPTermination is the name of the option to enable IPIP termination
EnableIPIPTermination = "enable-ipip-termination"
// Add unreachable routes on pod deletion
EnableUnreachableRoutes = "enable-unreachable-routes"
// EncryptInterface enables encryption on specified interface
EncryptInterface = "encrypt-interface"
// EncryptNode enables node IP encryption
EncryptNode = "encrypt-node"
// GopsPort is the TCP port for the gops server.
GopsPort = "gops-port"
// EnableGops run the gops server
EnableGops = "enable-gops"
// FixedIdentityMapping is the key-value for the fixed identity mapping
// which allows to use reserved label for fixed identities
FixedIdentityMapping = "fixed-identity-mapping"
// FixedZoneMapping is the key-value for the fixed zone mapping which
// is used to map zone value (string) from EndpointSlice to ID (uint8)
// in lb{4,6}_backend in BPF map.
FixedZoneMapping = "fixed-zone-mapping"
// IPv4Range is the per-node IPv4 endpoint prefix, e.g. 10.16.0.0/16
IPv4Range = "ipv4-range"
// IPv6Range is the per-node IPv6 endpoint prefix, must be /96, e.g. fd02:1:1::/96
IPv6Range = "ipv6-range"
// IPv4ServiceRange is the Kubernetes IPv4 services CIDR if not inside cluster prefix
IPv4ServiceRange = "ipv4-service-range"
// IPv6ServiceRange is the Kubernetes IPv6 services CIDR if not inside cluster prefix
IPv6ServiceRange = "ipv6-service-range"
// IPv6ClusterAllocCIDRName is the name of the IPv6ClusterAllocCIDR option
IPv6ClusterAllocCIDRName = "ipv6-cluster-alloc-cidr"
// K8sRequireIPv4PodCIDRName is the name of the K8sRequireIPv4PodCIDR option
K8sRequireIPv4PodCIDRName = "k8s-require-ipv4-pod-cidr"
// K8sRequireIPv6PodCIDRName is the name of the K8sRequireIPv6PodCIDR option
K8sRequireIPv6PodCIDRName = "k8s-require-ipv6-pod-cidr"
// K8sWatcherEndpointSelector specifies the k8s endpoints that Cilium
// should watch for.
K8sWatcherEndpointSelector = "k8s-watcher-endpoint-selector"
// EnableK8s operation of Kubernetes-related services/controllers.
// Intended for operating cilium with CNI-compatible orchestrators other than Kubernetes. (default is true)
EnableK8s = "enable-k8s"
// K8sAPIServer is the kubernetes api address server (for https use --k8s-kubeconfig-path instead)
K8sAPIServer = "k8s-api-server"
// K8sKubeConfigPath is the absolute path of the kubernetes kubeconfig file
K8sKubeConfigPath = "k8s-kubeconfig-path"
// K8sServiceCacheSize is service cache size for cilium k8s package.
K8sServiceCacheSize = "k8s-service-cache-size"
// K8sServiceDebounceBufferSize is the maximum number of service events to buffer.
K8sServiceDebounceBufferSize = "k8s-service-debounce-buffer-size"
// K8sServiceDebounceBufferWaitTime is the amount of time to wait before emitting
// the service event buffer.
K8sServiceDebounceWaitTime = "k8s-service-debounce-wait-time"
// K8sSyncTimeout is the timeout since last event was received to synchronize all resources with k8s.
K8sSyncTimeoutName = "k8s-sync-timeout"
// AllocatorListTimeout is the timeout to list initial allocator state.
AllocatorListTimeoutName = "allocator-list-timeout"
// KeepConfig when restoring state, keeps containers' configuration in place
KeepConfig = "keep-config"
// KVStore key-value store type
KVStore = "kvstore"
// KVStoreOpt key-value store options
KVStoreOpt = "kvstore-opt"
// Labels is the list of label prefixes used to determine identity of an endpoint
Labels = "labels"
// LabelPrefixFile is the valid label prefixes file path
LabelPrefixFile = "label-prefix-file"
// EnableHostFirewall enables network policies for the host
EnableHostFirewall = "enable-host-firewall"
// EnableHostPort enables HostPort forwarding implemented by Cilium in BPF
EnableHostPort = "enable-host-port"
// EnableHostLegacyRouting enables the old routing path via stack.
EnableHostLegacyRouting = "enable-host-legacy-routing"
// EnableNodePort enables NodePort services implemented by Cilium in BPF
EnableNodePort = "enable-node-port"
// EnableSVCSourceRangeCheck enables check of service source range checks
EnableSVCSourceRangeCheck = "enable-svc-source-range-check"
// NodePortMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
NodePortMode = "node-port-mode"
// NodePortAlg indicates which algorithm is used for backend selection
// ("random" or "maglev")
NodePortAlg = "node-port-algorithm"
// NodePortAcceleration indicates whether NodePort should be accelerated
// via XDP ("none", "generic", "native", or "best-effort")
NodePortAcceleration = "node-port-acceleration"
// Alias to NodePortMode
LoadBalancerMode = "bpf-lb-mode"
// LoadBalancerModeAnnotation tells whether controller should check service
// level annotation for configuring bpf loadbalancing method (snat vs dsr).
LoadBalancerModeAnnotation = "bpf-lb-mode-annotation"
// Alias to DSR dispatch method
LoadBalancerDSRDispatch = "bpf-lb-dsr-dispatch"
// Alias to DSR/IPIP IPv4 source CIDR
LoadBalancerRSSv4CIDR = "bpf-lb-rss-ipv4-src-cidr"
// Alias to DSR/IPIP IPv6 source CIDR
LoadBalancerRSSv6CIDR = "bpf-lb-rss-ipv6-src-cidr"
// Alias to NodePortAlg
LoadBalancerAlgorithm = "bpf-lb-algorithm"
// LoadBalancerAlgorithmAnnotation tells whether controller should check service
// level annotation for configuring bpf loadbalancing algorithm.
LoadBalancerAlgorithmAnnotation = "bpf-lb-algorithm-annotation"
// Alias to NodePortAcceleration
LoadBalancerAcceleration = "bpf-lb-acceleration"
// LoadBalancerExternalControlPlane switch skips connectivity to kube-apiserver
// which is relevant in lb-only mode
LoadBalancerExternalControlPlane = "bpf-lb-external-control-plane"
// LoadBalancerProtocolDifferentiation enables support for service protocol differentiation (TCP, UDP, SCTP)
LoadBalancerProtocolDifferentiation = "bpf-lb-proto-diff"
// NodePortBindProtection rejects bind requests to NodePort service ports
NodePortBindProtection = "node-port-bind-protection"
// NodePortRange defines a custom range where to look up NodePort services
NodePortRange = "node-port-range"
// EnableAutoProtectNodePortRange enables appending NodePort range to
// net.ipv4.ip_local_reserved_ports if it overlaps with ephemeral port
// range (net.ipv4.ip_local_port_range)
EnableAutoProtectNodePortRange = "enable-auto-protect-node-port-range"
// KubeProxyReplacement controls how to enable kube-proxy replacement
// features in BPF datapath
KubeProxyReplacement = "kube-proxy-replacement"
// EnableSessionAffinity enables a support for service sessionAffinity
EnableSessionAffinity = "enable-session-affinity"
// EnableIdentityMark enables setting the mark field with the identity for
// local traffic. This may be disabled if chaining modes and Cilium use
// conflicting marks.
EnableIdentityMark = "enable-identity-mark"
// AddressScopeMax controls the maximum address scope for addresses to be
// considered local ones with HOST_ID in the ipcache
AddressScopeMax = "local-max-addr-scope"
// EnableRecorder enables the datapath pcap recorder
EnableRecorder = "enable-recorder"
// EnableLocalRedirectPolicy enables support for local redirect policy
EnableLocalRedirectPolicy = "enable-local-redirect-policy"
// EnableMKE enables MKE specific 'chaining' for kube-proxy replacement
EnableMKE = "enable-mke"
// CgroupPathMKE points to the cgroupv1 net_cls mount instance
CgroupPathMKE = "mke-cgroup-mount"
// LibDir enables the directory path to store runtime build environment
LibDir = "lib-dir"
// LogDriver sets logging endpoints to use for example syslog, fluentd
LogDriver = "log-driver"
// LogOpt sets log driver options for cilium
LogOpt = "log-opt"
// EnableIPv4Masquerade masquerades IPv4 packets from endpoints leaving the host.
EnableIPv4Masquerade = "enable-ipv4-masquerade"
// EnableIPv6Masquerade masquerades IPv6 packets from endpoints leaving the host.
EnableIPv6Masquerade = "enable-ipv6-masquerade"
// EnableBPFClockProbe selects a more efficient source clock (jiffies vs ktime)
EnableBPFClockProbe = "enable-bpf-clock-probe"
// EnableBPFMasquerade masquerades packets from endpoints leaving the host with BPF instead of iptables
EnableBPFMasquerade = "enable-bpf-masquerade"
// EnableMasqueradeRouteSource masquerades to the source route IP address instead of the interface one
EnableMasqueradeRouteSource = "enable-masquerade-to-route-source"
// EnableIPMasqAgent enables BPF ip-masq-agent
EnableIPMasqAgent = "enable-ip-masq-agent"
// EnableIPv4EgressGateway enables the IPv4 egress gateway
EnableIPv4EgressGateway = "enable-ipv4-egress-gateway"
// EnableEnvoyConfig enables processing of CiliumClusterwideEnvoyConfig and CiliumEnvoyConfig CRDs
EnableEnvoyConfig = "enable-envoy-config"
// IPMasqAgentConfigPath is the configuration file path
IPMasqAgentConfigPath = "ip-masq-agent-config-path"
// InstallIptRules sets whether Cilium should install any iptables in general
InstallIptRules = "install-iptables-rules"
// InstallNoConntrackIptRules instructs Cilium to install Iptables rules
// to skip netfilter connection tracking on all pod traffic.
InstallNoConntrackIptRules = "install-no-conntrack-iptables-rules"
// ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve
// the provided comma-separated list of ports in the container network namespace
ContainerIPLocalReservedPorts = "container-ip-local-reserved-ports"
// IPv6NodeAddr is the IPv6 address of node
IPv6NodeAddr = "ipv6-node"
// IPv4NodeAddr is the IPv4 address of node
IPv4NodeAddr = "ipv4-node"
// Restore restores state, if possible, from previous daemon
Restore = "restore"
// SocketPath sets daemon's socket path to listen for connections
SocketPath = "socket-path"
// StateDir is the directory path to store runtime state
StateDir = "state-dir"
// TracePayloadlen length of payload to capture when tracing
TracePayloadlen = "trace-payloadlen"
// Version prints the version information
Version = "version"
// EnableXDPPrefilter enables XDP-based prefiltering
EnableXDPPrefilter = "enable-xdp-prefilter"
// EnableTCX enables attaching endpoint programs using tcx if the kernel supports it
EnableTCX = "enable-tcx"
ProcFs = "procfs"
// PrometheusServeAddr IP:Port on which to serve prometheus metrics (pass ":Port" to bind on all interfaces, "" is off)
PrometheusServeAddr = "prometheus-serve-addr"
// ExternalEnvoyProxy defines whether the Envoy is deployed externally in form of a DaemonSet or not.
ExternalEnvoyProxy = "external-envoy-proxy"
// CMDRef is the path to cmdref output directory
CMDRef = "cmdref"
// DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain
// for each FQDN selector in endpoint's restored DNS rules
DNSMaxIPsPerRestoredRule = "dns-max-ips-per-restored-rule"
// DNSPolicyUnloadOnShutdown is the name of the dns-policy-unload-on-shutdown option.
DNSPolicyUnloadOnShutdown = "dns-policy-unload-on-shutdown"
// ToFQDNsMinTTL is the minimum time, in seconds, to use DNS data for toFQDNs policies.
ToFQDNsMinTTL = "tofqdns-min-ttl"
// ToFQDNsProxyPort is the global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port.
ToFQDNsProxyPort = "tofqdns-proxy-port"
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to maintain
// for each FQDN name in an endpoint's FQDN cache
ToFQDNsMaxIPsPerHost = "tofqdns-endpoint-max-ip-per-hostname"
// ToFQDNsMaxDeferredConnectionDeletes defines the maximum number of IPs to
// retain for expired DNS lookups with still-active connections"
ToFQDNsMaxDeferredConnectionDeletes = "tofqdns-max-deferred-connection-deletes"
// ToFQDNsIdleConnectionGracePeriod defines the connection idle time during which
// previously active connections with expired DNS lookups are still considered alive
ToFQDNsIdleConnectionGracePeriod = "tofqdns-idle-connection-grace-period"
// ToFQDNsPreCache is a path to a file with DNS cache data to insert into the
// global cache on startup.
// The file is not re-read after agent start.
ToFQDNsPreCache = "tofqdns-pre-cache"
// ToFQDNsEnableDNSCompression allows the DNS proxy to compress responses to
// endpoints that are larger than 512 Bytes or the EDNS0 option, if present.
ToFQDNsEnableDNSCompression = "tofqdns-enable-dns-compression"
// DNSProxyConcurrencyLimit limits parallel processing of DNS messages in
// DNS proxy at any given point in time.
DNSProxyConcurrencyLimit = "dnsproxy-concurrency-limit"
// DNSProxyConcurrencyProcessingGracePeriod is the amount of grace time to
// wait while processing DNS messages when the DNSProxyConcurrencyLimit has
// been reached.
DNSProxyConcurrencyProcessingGracePeriod = "dnsproxy-concurrency-processing-grace-period"
// DNSProxyLockCount is the array size containing mutexes which protect
// against parallel handling of DNS response IPs.
DNSProxyLockCount = "dnsproxy-lock-count"
// DNSProxyLockTimeout is timeout when acquiring the locks controlled by
// DNSProxyLockCount.
DNSProxyLockTimeout = "dnsproxy-lock-timeout"
// DNSProxySocketLingerTimeout defines how many seconds we wait for the connection
// between the DNS proxy and the upstream server to be closed.
DNSProxySocketLingerTimeout = "dnsproxy-socket-linger-timeout"
// DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy.
DNSProxyEnableTransparentMode = "dnsproxy-enable-transparent-mode"
// DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users
// to disable transparent mode even if IPSec is enabled
DNSProxyInsecureSkipTransparentModeCheck = "dnsproxy-insecure-skip-transparent-mode-check"
// MTUName is the name of the MTU option
MTUName = "mtu"
// RouteMetric is the name of the route-metric option
RouteMetric = "route-metric"
// DatapathMode is the name of the DatapathMode option
DatapathMode = "datapath-mode"
// EnableSocketLB is the name for the option to enable the socket LB
EnableSocketLB = "bpf-lb-sock"
// EnableSocketLBTracing is the name for the option to enable the socket LB tracing
EnableSocketLBTracing = "trace-sock"
// BPFSocketLBHostnsOnly is the name of the BPFSocketLBHostnsOnly option
BPFSocketLBHostnsOnly = "bpf-lb-sock-hostns-only"
// EnableSocketLBPodConnectionTermination enables termination of pod connections
// to deleted service backends when socket-LB is enabled.
EnableSocketLBPodConnectionTermination = "bpf-lb-sock-terminate-pod-connections"
// RoutingMode is the name of the option to choose between native routing and tunneling mode
RoutingMode = "routing-mode"
// ServiceNoBackendResponse is the name of the option to pick how to handle traffic for services
// without any backends
ServiceNoBackendResponse = "service-no-backend-response"
// ServiceNoBackendResponseReject is the name of the option to reject traffic for services
// without any backends
ServiceNoBackendResponseReject = "reject"
// ServiceNoBackendResponseDrop is the name of the option to drop traffic for services
// without any backends
ServiceNoBackendResponseDrop = "drop"
// MaxInternalTimerDelay sets a maximum on all periodic timers in
// the agent in order to flush out timer-related bugs in the agent.
MaxInternalTimerDelay = "max-internal-timer-delay"
// MonitorAggregationName specifies the MonitorAggregationLevel on the
// comandline.
MonitorAggregationName = "monitor-aggregation"
// MonitorAggregationInterval configures interval for monitor-aggregation
MonitorAggregationInterval = "monitor-aggregation-interval"
// MonitorAggregationFlags configures TCP flags used by monitor aggregation.
MonitorAggregationFlags = "monitor-aggregation-flags"
// ciliumEnvPrefix is the prefix used for environment variables
ciliumEnvPrefix = "CILIUM_"
// CNIChainingMode configures which CNI plugin Cilium is chained with.
CNIChainingMode = "cni-chaining-mode"
// CNIChainingTarget is the name of a CNI network in to which we should
// insert our plugin configuration
CNIChainingTarget = "cni-chaining-target"
// AuthMapEntriesMin defines the minimum auth map limit.
AuthMapEntriesMin = 1 << 8
// AuthMapEntriesMax defines the maximum auth map limit.
AuthMapEntriesMax = 1 << 24
// AuthMapEntriesDefault defines the default auth map limit.
AuthMapEntriesDefault = 1 << 19
// BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled
BPFConntrackAccountingDefault = false
// AuthMapEntriesName configures max entries for BPF auth map.
AuthMapEntriesName = "bpf-auth-map-max"
// CTMapEntriesGlobalTCPDefault is the default maximum number of entries
// in the TCP CT table.
CTMapEntriesGlobalTCPDefault = 2 << 18 // 512Ki
// CTMapEntriesGlobalAnyDefault is the default maximum number of entries
// in the non-TCP CT table.
CTMapEntriesGlobalAnyDefault = 2 << 17 // 256Ki
// CTMapEntriesGlobalTCPName configures max entries for the TCP CT
// table.
CTMapEntriesGlobalTCPName = "bpf-ct-global-tcp-max"
// CTMapEntriesGlobalAnyName configures max entries for the non-TCP CT
// table.
CTMapEntriesGlobalAnyName = "bpf-ct-global-any-max"
// CTMapEntriesTimeout* name option and default value mappings
CTMapEntriesTimeoutSYNName = "bpf-ct-timeout-regular-tcp-syn"
CTMapEntriesTimeoutFINName = "bpf-ct-timeout-regular-tcp-fin"
CTMapEntriesTimeoutTCPName = "bpf-ct-timeout-regular-tcp"
CTMapEntriesTimeoutAnyName = "bpf-ct-timeout-regular-any"
CTMapEntriesTimeoutSVCTCPName = "bpf-ct-timeout-service-tcp"
CTMapEntriesTimeoutSVCTCPGraceName = "bpf-ct-timeout-service-tcp-grace"
CTMapEntriesTimeoutSVCAnyName = "bpf-ct-timeout-service-any"
// NATMapEntriesGlobalDefault holds the default size of the NAT map
// and is 2/3 of the full CT size as a heuristic
NATMapEntriesGlobalDefault = int((CTMapEntriesGlobalTCPDefault + CTMapEntriesGlobalAnyDefault) * 2 / 3)
// SockRevNATMapEntriesDefault holds the default size of the SockRev NAT map
// and is the same size of CTMapEntriesGlobalAnyDefault as a heuristic given
// that sock rev NAT is mostly used for UDP and getpeername only.
SockRevNATMapEntriesDefault = CTMapEntriesGlobalAnyDefault
// MapEntriesGlobalDynamicSizeRatioName is the name of the option to
// set the ratio of total system memory to use for dynamic sizing of the
// CT, NAT, Neighbor and SockRevNAT BPF maps.
MapEntriesGlobalDynamicSizeRatioName = "bpf-map-dynamic-size-ratio"
// LimitTableAutoGlobalTCPMin defines the minimum TCP CT table limit for
// dynamic size ration calculation.
LimitTableAutoGlobalTCPMin = 1 << 17 // 128Ki entries
// LimitTableAutoGlobalAnyMin defines the minimum UDP CT table limit for
// dynamic size ration calculation.
LimitTableAutoGlobalAnyMin = 1 << 16 // 64Ki entries
// LimitTableAutoNatGlobalMin defines the minimum NAT limit for dynamic size
// ration calculation.
LimitTableAutoNatGlobalMin = 1 << 17 // 128Ki entries
// LimitTableAutoSockRevNatMin defines the minimum SockRevNAT limit for
// dynamic size ration calculation.
LimitTableAutoSockRevNatMin = 1 << 16 // 64Ki entries
// LimitTableMin defines the minimum CT or NAT table limit
LimitTableMin = 1 << 10 // 1Ki entries
// LimitTableMax defines the maximum CT or NAT table limit
LimitTableMax = 1 << 24 // 16Mi entries (~1GiB of entries per map)
// PolicyMapMin defines the minimum policy map limit.
PolicyMapMin = 1 << 8
// PolicyMapMax defines the maximum policy map limit.
PolicyMapMax = 1 << 16
// FragmentsMapMin defines the minimum fragments map limit.
FragmentsMapMin = 1 << 8
// FragmentsMapMax defines the maximum fragments map limit.
FragmentsMapMax = 1 << 16
// NATMapEntriesGlobalName configures max entries for BPF NAT table
NATMapEntriesGlobalName = "bpf-nat-global-max"
// NeighMapEntriesGlobalName configures max entries for BPF neighbor table
NeighMapEntriesGlobalName = "bpf-neigh-global-max"
// PolicyMapEntriesName configures max entries for BPF policymap.
PolicyMapEntriesName = "bpf-policy-map-max"
// PolicyMapFullReconciliationInterval sets the interval for performing the full
// reconciliation of the endpoint policy map.
PolicyMapFullReconciliationIntervalName = "bpf-policy-map-full-reconciliation-interval"
// SockRevNatEntriesName configures max entries for BPF sock reverse nat
// entries.
SockRevNatEntriesName = "bpf-sock-rev-map-max"
// EgressGatewayPolicyMapEntriesName configures max entries for egress gateway's policy
// map.
EgressGatewayPolicyMapEntriesName = "egress-gateway-policy-map-max"
// LogSystemLoadConfigName is the name of the option to enable system
// load loggging
LogSystemLoadConfigName = "log-system-load"
// DisableCiliumEndpointCRDName is the name of the option to disable
// use of the CEP CRD
DisableCiliumEndpointCRDName = "disable-endpoint-crd"
// MaxCtrlIntervalName and MaxCtrlIntervalNameEnv allow configuration
// of MaxControllerInterval.
MaxCtrlIntervalName = "max-controller-interval"
// K8sNamespaceName is the name of the K8sNamespace option
K8sNamespaceName = "k8s-namespace"
// AgentNotReadyNodeTaintKeyName is the name of the option to set
// AgentNotReadyNodeTaintKey
AgentNotReadyNodeTaintKeyName = "agent-not-ready-taint-key"
// JoinClusterName is the name of the JoinCluster Option
JoinClusterName = "join-cluster"
// EnableIPv4Name is the name of the option to enable IPv4 support
EnableIPv4Name = "enable-ipv4"
// EnableIPv6Name is the name of the option to enable IPv6 support
EnableIPv6Name = "enable-ipv6"
// EnableIPv6NDPName is the name of the option to enable IPv6 NDP support
EnableIPv6NDPName = "enable-ipv6-ndp"
// EnableSRv6 is the name of the option to enable SRv6 encapsulation support
EnableSRv6 = "enable-srv6"
// SRv6EncapModeName is the name of the option to specify the SRv6 encapsulation mode
SRv6EncapModeName = "srv6-encap-mode"
// EnableSCTPName is the name of the option to enable SCTP support
EnableSCTPName = "enable-sctp"
// EnableNat46X64Gateway enables L3 based NAT46 and NAT64 gateway
EnableNat46X64Gateway = "enable-nat46x64-gateway"
// IPv6MCastDevice is the name of the option to select IPv6 multicast device
IPv6MCastDevice = "ipv6-mcast-device"
// BPFEventsDefaultRateLimit specifies limit of messages per second that can be written to
// BPF events map. This limit is defined for all types of events except dbg and pcap.
// The number of messages is averaged, meaning that if no messages were written
// to the map over 5 seconds, it's possible to write more events than the value of rate limit
// in the 6th second.
//
// If BPFEventsDefaultRateLimit > 0, non-zero value for BPFEventsDefaultBurstLimit must also be provided
// lest the configuration is considered invalid.
// If both rate and burst limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultRateLimit = "bpf-events-default-rate-limit"
// BPFEventsDefaultBurstLimit specifies the maximum number of messages that can be written
// to BPF events map in 1 second. This limit is defined for all types of events except dbg and pcap.
//
// If BPFEventsDefaultBurstLimit > 0, non-zero value for BPFEventsDefaultRateLimit must also be provided
// lest the configuration is considered invalid.
// If both burst and rate limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultBurstLimit = "bpf-events-default-burst-limit"
// FQDNRejectResponseCode is the name for the option for dns-proxy reject response code
FQDNRejectResponseCode = "tofqdns-dns-reject-response-code"
// FQDNProxyDenyWithNameError is useful when stub resolvers, like the one
// in Alpine Linux's libc (musl), treat a REFUSED as a resolution error.
// This happens when trying a DNS search list, as in kubernetes, and breaks
// even whitelisted DNS names.
FQDNProxyDenyWithNameError = "nameError"
// FQDNProxyDenyWithRefused is the response code for Domain refused. It is
// the default for denied DNS requests.
FQDNProxyDenyWithRefused = "refused"
// FQDNProxyResponseMaxDelay is the maximum time the proxy holds back a response
FQDNProxyResponseMaxDelay = "tofqdns-proxy-response-max-delay"
// FQDNRegexCompileLRUSize is the size of the FQDN regex compilation LRU.
// Useful for heavy but repeated FQDN MatchName or MatchPattern use.
FQDNRegexCompileLRUSize = "fqdn-regex-compile-lru-size"
// PreAllocateMapsName is the name of the option PreAllocateMaps
PreAllocateMapsName = "preallocate-bpf-maps"
// EnableBPFTProxy option supports enabling or disabling BPF TProxy.
EnableBPFTProxy = "enable-bpf-tproxy"
// EnableAutoDirectRoutingName is the name for the EnableAutoDirectRouting option
EnableAutoDirectRoutingName = "auto-direct-node-routes"
// DirectRoutingSkipUnreachableName is the name for the DirectRoutingSkipUnreachable option
DirectRoutingSkipUnreachableName = "direct-routing-skip-unreachable"
// EnableIPSecName is the name of the option to enable IPSec
EnableIPSecName = "enable-ipsec"
// Duration of the IPsec key rotation. After that time, we will clean the
// previous IPsec key from the node.
IPsecKeyRotationDuration = "ipsec-key-rotation-duration"
// Enable watcher for IPsec key. If disabled, a restart of the agent will
// be necessary on key rotations.
EnableIPsecKeyWatcher = "enable-ipsec-key-watcher"
// Enable caching for XfrmState for IPSec. Significantly reduces CPU usage
// in large clusters.
EnableIPSecXfrmStateCaching = "enable-ipsec-xfrm-state-caching"
// IPSecKeyFileName is the name of the option for ipsec key file
IPSecKeyFileName = "ipsec-key-file"
// EnableIPSecEncrytpedOverlay is the name of the option which enables
// the EncryptedOverlay feature.
//
// This feature will encrypt overlay traffic before it leaves the cluster.
EnableIPSecEncryptedOverlay = "enable-ipsec-encrypted-overlay"
// BootIDFilename is a hidden flag that allows users to specify a
// filename other than /proc/sys/kernel/random/boot_id. This can be
// useful for testing purposes in local containerized cluster.
BootIDFilename = "boot-id-file"
// EnableWireguard is the name of the option to enable WireGuard
EnableWireguard = "enable-wireguard"
// WireguardTrackAllIPsFallback forces the WireGuard agent to track all IPs.
WireguardTrackAllIPsFallback = "wireguard-track-all-ips-fallback"
// EnableL2Announcements is the name of the option to enable l2 announcements
EnableL2Announcements = "enable-l2-announcements"
// L2AnnouncerLeaseDuration, if a lease has not been renewed for X amount of time, a new leader can be chosen.
L2AnnouncerLeaseDuration = "l2-announcements-lease-duration"
// L2AnnouncerRenewDeadline, the leader will renew the lease every X amount of time.
L2AnnouncerRenewDeadline = "l2-announcements-renew-deadline"
// L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time.
L2AnnouncerRetryPeriod = "l2-announcements-retry-period"
// EnableEncryptionStrictMode is the name of the option to enable strict encryption mode.
EnableEncryptionStrictMode = "enable-encryption-strict-mode"
// EncryptionStrictModeCIDR is the CIDR in which the strict ecryption mode should be enforced.
EncryptionStrictModeCIDR = "encryption-strict-mode-cidr"
// EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of remote node identities.
// This is required when tunneling is used
// or direct routing is used and the node CIDR and pod CIDR overlap.
EncryptionStrictModeAllowRemoteNodeIdentities = "encryption-strict-mode-allow-remote-node-identities"
// WireguardPersistentKeepalivee controls Wireguard PersistentKeepalive option. Set 0 to disable.
WireguardPersistentKeepalive = "wireguard-persistent-keepalive"
// NodeEncryptionOptOutLabels is the name of the option for the node-to-node encryption opt-out labels
NodeEncryptionOptOutLabels = "node-encryption-opt-out-labels"
// KVstoreLeaseTTL is the time-to-live for lease in kvstore.
KVstoreLeaseTTL = "kvstore-lease-ttl"
// KVstoreMaxConsecutiveQuorumErrorsName is the maximum number of acceptable
// kvstore consecutive quorum errors before the agent assumes permanent failure
KVstoreMaxConsecutiveQuorumErrorsName = "kvstore-max-consecutive-quorum-errors"
// KVstorePeriodicSync is the time interval in which periodic
// synchronization with the kvstore occurs
KVstorePeriodicSync = "kvstore-periodic-sync"
// KVstoreConnectivityTimeout is the timeout when performing kvstore operations
KVstoreConnectivityTimeout = "kvstore-connectivity-timeout"
// KVstorePodNetworkSupport enables the support for running the Cilium KVstore
// in pod network.
KVstorePodNetworkSupport = "kvstore-pod-network-support"
// IdentityChangeGracePeriod is the name of the
// IdentityChangeGracePeriod option
IdentityChangeGracePeriod = "identity-change-grace-period"
// IdentityRestoreGracePeriod is the name of the
// IdentityRestoreGracePeriod option
IdentityRestoreGracePeriod = "identity-restore-grace-period"
// EnableHealthChecking is the name of the EnableHealthChecking option
EnableHealthChecking = "enable-health-checking"
// EnableEndpointHealthChecking is the name of the EnableEndpointHealthChecking option
EnableEndpointHealthChecking = "enable-endpoint-health-checking"
// EnableHealthCheckNodePort is the name of the EnableHealthCheckNodePort option
EnableHealthCheckNodePort = "enable-health-check-nodeport"
// EnableHealthCheckLoadBalancerIP is the name of the EnableHealthCheckLoadBalancerIP option
EnableHealthCheckLoadBalancerIP = "enable-health-check-loadbalancer-ip"
// HealthCheckICMPFailureThreshold is the name of the HealthCheckICMPFailureThreshold option
HealthCheckICMPFailureThreshold = "health-check-icmp-failure-threshold"
// EndpointQueueSize is the size of the EventQueue per-endpoint.
EndpointQueueSize = "endpoint-queue-size"
// EndpointGCInterval interval to attempt garbage collection of
// endpoints that are no longer alive and healthy.
EndpointGCInterval = "endpoint-gc-interval"
// EndpointRegenInterval is the interval of the periodic endpoint regeneration loop.
EndpointRegenInterval = "endpoint-regen-interval"
// LoopbackIPv4 is the address to use for service loopback SNAT
LoopbackIPv4 = "ipv4-service-loopback-address"
// LocalRouterIPv4 is the link-local IPv4 address to use for Cilium router device
LocalRouterIPv4 = "local-router-ipv4"
// LocalRouterIPv6 is the link-local IPv6 address to use for Cilium router device
LocalRouterIPv6 = "local-router-ipv6"
// EnableEndpointRoutes enables use of per endpoint routes
EnableEndpointRoutes = "enable-endpoint-routes"
// ExcludeLocalAddress excludes certain addresses to be recognized as a
// local address
ExcludeLocalAddress = "exclude-local-address"
// IPv4PodSubnets A list of IPv4 subnets that pods may be
// assigned from. Used with CNI chaining where IPs are not directly managed
// by Cilium.
IPv4PodSubnets = "ipv4-pod-subnets"
// IPv6PodSubnets A list of IPv6 subnets that pods may be
// assigned from. Used with CNI chaining where IPs are not directly managed
// by Cilium.
IPv6PodSubnets = "ipv6-pod-subnets"
// IPAM is the IPAM method to use
IPAM = "ipam"
// IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool
IPAMMultiPoolPreAllocation = "ipam-multi-pool-pre-allocation"
// IPAMDefaultIPPool defines the default IP Pool when using multi-pool
IPAMDefaultIPPool = "ipam-default-ip-pool"
// XDPModeNative for loading progs with XDPModeLinkDriver
XDPModeNative = "native"
// XDPModeBestEffort for loading progs with XDPModeLinkDriver
XDPModeBestEffort = "best-effort"
// XDPModeGeneric for loading progs with XDPModeLinkGeneric
XDPModeGeneric = "testing-only"
// XDPModeDisabled for not having XDP enabled
XDPModeDisabled = "disabled"
// XDPModeLinkDriver is the tc selector for native XDP
XDPModeLinkDriver = "xdpdrv"
// XDPModeLinkGeneric is the tc selector for generic XDP
XDPModeLinkGeneric = "xdpgeneric"
// XDPModeLinkNone for not having XDP enabled
XDPModeLinkNone = XDPModeDisabled
// K8sClientQPSLimit is the queries per second limit for the K8s client. Defaults to k8s client defaults.
K8sClientQPSLimit = "k8s-client-qps"
// K8sClientBurst is the burst value allowed for the K8s client. Defaults to k8s client defaults.
K8sClientBurst = "k8s-client-burst"
// AutoCreateCiliumNodeResource enables automatic creation of a
// CiliumNode resource for the local node
AutoCreateCiliumNodeResource = "auto-create-cilium-node-resource"
// ExcludeNodeLabelPatterns allows for excluding unnecessary labels from being propagated from k8s node to cilium
// node object. This allows for avoiding unnecessary events being broadcast to all nodes in the cluster.
ExcludeNodeLabelPatterns = "exclude-node-label-patterns"
// IPv4NativeRoutingCIDR describes a v4 CIDR in which pod IPs are routable
IPv4NativeRoutingCIDR = "ipv4-native-routing-cidr"
// IPv6NativeRoutingCIDR describes a v6 CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR = "ipv6-native-routing-cidr"
// MasqueradeInterfaces is the selector used to select interfaces subject to
// egress masquerading
MasqueradeInterfaces = "egress-masquerade-interfaces"
// PolicyTriggerInterval is the amount of time between triggers of policy
// updates are invoked.
PolicyTriggerInterval = "policy-trigger-interval"
// IdentityAllocationMode specifies what mode to use for identity
// allocation
IdentityAllocationMode = "identity-allocation-mode"
// IdentityAllocationModeKVstore enables use of a key-value store such
// as etcd for identity allocation
IdentityAllocationModeKVstore = "kvstore"
// IdentityAllocationModeCRD enables use of Kubernetes CRDs for
// identity allocation
IdentityAllocationModeCRD = "crd"
// IdentityAllocationModeDoubleWriteReadKVstore writes identities to the KVStore and as CRDs at the same time.
// Identities are then read from the KVStore.
IdentityAllocationModeDoubleWriteReadKVstore = "doublewrite-readkvstore"
// IdentityAllocationModeDoubleWriteReadCRD writes identities to the KVStore and as CRDs at the same time.
// Identities are then read from the CRDs.
IdentityAllocationModeDoubleWriteReadCRD = "doublewrite-readcrd"
// EnableLocalNodeRoute controls installation of the route which points
// the allocation prefix of the local node.
EnableLocalNodeRoute = "enable-local-node-route"
// PolicyAuditModeArg argument enables policy audit mode.
PolicyAuditModeArg = "policy-audit-mode"
// PolicyAccountingArg argument enable policy accounting.
PolicyAccountingArg = "policy-accounting"
// K8sClientConnectionTimeout configures the timeout for K8s client connections.
K8sClientConnectionTimeout = "k8s-client-connection-timeout"
// K8sClientConnectionKeepAlive configures the keep alive duration for K8s client connections.
K8sClientConnectionKeepAlive = "k8s-client-connection-keep-alive"
// K8sHeartbeatTimeout configures the timeout for apiserver heartbeat
K8sHeartbeatTimeout = "k8s-heartbeat-timeout"
// EnableIPv4FragmentsTrackingName is the name of the option to enable
// IPv4 fragments tracking for L4-based lookups. Needs LRU map support.
EnableIPv4FragmentsTrackingName = "enable-ipv4-fragment-tracking"
// FragmentsMapEntriesName configures max entries for BPF fragments
// tracking map.
FragmentsMapEntriesName = "bpf-fragments-map-max"
// K8sEnableAPIDiscovery enables Kubernetes API discovery
K8sEnableAPIDiscovery = "enable-k8s-api-discovery"
// LBMapEntriesName configures max entries for BPF lbmap.
LBMapEntriesName = "bpf-lb-map-max"
// LBServiceMapMaxEntries configures max entries of bpf map for services.
LBServiceMapMaxEntries = "bpf-lb-service-map-max"
// LBBackendMapMaxEntries configures max entries of bpf map for service backends.
LBBackendMapMaxEntries = "bpf-lb-service-backend-map-max"
// LBRevNatMapMaxEntries configures max entries of bpf map for reverse NAT.
LBRevNatMapMaxEntries = "bpf-lb-rev-nat-map-max"
// LBAffinityMapMaxEntries configures max entries of bpf map for session affinity.
LBAffinityMapMaxEntries = "bpf-lb-affinity-map-max"
// LBSourceRangeAllTypes configures service source ranges for all service types.
LBSourceRangeAllTypes = "bpf-lb-source-range-all-types"
// LBSourceRangeMapMaxEntries configures max entries of bpf map for service source ranges.
LBSourceRangeMapMaxEntries = "bpf-lb-source-range-map-max"
// LBMaglevMapMaxEntries configures max entries of bpf map for Maglev.
LBMaglevMapMaxEntries = "bpf-lb-maglev-map-max"
// CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not
// available.
CRDWaitTimeout = "crd-wait-timeout"
// EgressMultiHomeIPRuleCompat instructs Cilium to use a new scheme to
// store rules and routes under ENI and Azure IPAM modes, if false.
// Otherwise, it will use the old scheme.
EgressMultiHomeIPRuleCompat = "egress-multi-home-ip-rule-compat"
// Install ingress/egress routes through uplink on host for Pods when working with
// delegated IPAM plugin.
InstallUplinkRoutesForDelegatedIPAM = "install-uplink-routes-for-delegated-ipam"
// EnableCustomCallsName is the name of the option to enable tail calls
// for user-defined custom eBPF programs.
EnableCustomCallsName = "enable-custom-calls"
// BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from.
BGPSecretsNamespace = "bgp-secrets-namespace"
// ExternalClusterIPName is the name of the option to enable
// cluster external access to ClusterIP services.
ExternalClusterIPName = "bpf-lb-external-clusterip"
// VLANBPFBypass instructs Cilium to bypass bpf logic for vlan tagged packets
VLANBPFBypass = "vlan-bpf-bypass"
// DisableExternalIPMitigation disable ExternalIP mitigation (CVE-2020-8554)
DisableExternalIPMitigation = "disable-external-ip-mitigation"
// EnableICMPRules enables ICMP-based rule support for Cilium Network Policies.
EnableICMPRules = "enable-icmp-rules"
// Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation.
UseCiliumInternalIPForIPsec = "use-cilium-internal-ip-for-ipsec"
// BypassIPAvailabilityUponRestore bypasses the IP availability error
// within IPAM upon endpoint restore and allows the use of the restored IP
// regardless of whether it's available in the pool.
BypassIPAvailabilityUponRestore = "bypass-ip-availability-upon-restore"
// EnableK8sTerminatingEndpoint enables the option to auto detect terminating
// state for endpoints in order to support graceful termination.
EnableK8sTerminatingEndpoint = "enable-k8s-terminating-endpoint"
// EnableVTEP enables cilium VXLAN VTEP integration
EnableVTEP = "enable-vtep"
// VTEP endpoint IPs
VtepEndpoint = "vtep-endpoint"
// VTEP CIDRs
VtepCIDR = "vtep-cidr"
// VTEP CIDR Mask applies to all VtepCIDR
VtepMask = "vtep-mask"
// VTEP MACs
VtepMAC = "vtep-mac"
// TCFilterPriority sets the priority of the cilium tc filter, enabling other
// filters to be inserted prior to the cilium filter.
TCFilterPriority = "bpf-filter-priority"
// Flag to enable BGP control plane features
EnableBGPControlPlane = "enable-bgp-control-plane"
// EnableBGPControlPlaneStatusReport enables BGP Control Plane CRD status reporting
EnableBGPControlPlaneStatusReport = "enable-bgp-control-plane-status-report"
// BGP router-id allocation mode in ipv6 standalone environment
BGPRouterIDAllocationMode = "bgp-router-id-allocation-mode"
// EnableRuntimeDeviceDetection is the name of the option to enable detection
// of new and removed datapath devices during the agent runtime.
EnableRuntimeDeviceDetection = "enable-runtime-device-detection"
// EnablePMTUDiscovery enables path MTU discovery to send ICMP
// fragmentation-needed replies to the client (when needed).
EnablePMTUDiscovery = "enable-pmtu-discovery"
// BPFMapEventBuffers specifies what maps should have event buffers enabled,
// and the max size and TTL of events in the buffers should be.
BPFMapEventBuffers = "bpf-map-event-buffers"
// IPAMCiliumnodeUpdateRate is the maximum rate at which the CiliumNode custom
// resource is updated.
IPAMCiliumNodeUpdateRate = "ipam-cilium-node-update-rate"
// EnableK8sNetworkPolicy enables support for K8s NetworkPolicy.
EnableK8sNetworkPolicy = "enable-k8s-networkpolicy"
// EnableCiliumNetworkPolicy enables support for Cilium Network Policy.
EnableCiliumNetworkPolicy = "enable-cilium-network-policy"
// EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide
// Network Policy.
EnableCiliumClusterwideNetworkPolicy = "enable-cilium-clusterwide-network-policy"
// PolicyCIDRMatchMode defines the entities that CIDR selectors can reach
PolicyCIDRMatchMode = "policy-cidr-match-mode"
// EnableNodeSelectorLabels enables use of the node label based identity
EnableNodeSelectorLabels = "enable-node-selector-labels"
// NodeLabels is the list of label prefixes used to determine identity of a node (requires enabling of
// EnableNodeSelectorLabels)
NodeLabels = "node-labels"
// BPFEventsDropEnabled defines the DropNotification setting for any endpoint
BPFEventsDropEnabled = "bpf-events-drop-enabled"
// BPFEventsPolicyVerdictEnabled defines the PolicyVerdictNotification setting for any endpoint
BPFEventsPolicyVerdictEnabled = "bpf-events-policy-verdict-enabled"
// BPFEventsTraceEnabled defines the TraceNotification setting for any endpoint
BPFEventsTraceEnabled = "bpf-events-trace-enabled"
// BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled
BPFConntrackAccounting = "bpf-conntrack-accounting"
// EnableInternalTrafficPolicy enables handling routing for services with internalTrafficPolicy configured
EnableInternalTrafficPolicy = "enable-internal-traffic-policy"
// EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode
EnableNonDefaultDenyPolicies = "enable-non-default-deny-policies"
// EnableEndpointLockdownOnPolicyOverflow enables endpoint lockdown when an endpoint's
// policy map overflows.
EnableEndpointLockdownOnPolicyOverflow = "enable-endpoint-lockdown-on-policy-overflow"
)
// Default string arguments
var (
FQDNRejectOptions = []string{FQDNProxyDenyWithNameError, FQDNProxyDenyWithRefused}
// MonitorAggregationFlagsDefault ensure that all TCP flags trigger
// monitor notifications even under medium monitor aggregation.
MonitorAggregationFlagsDefault = []string{"syn", "fin", "rst"}
)
// Available options for DaemonConfig.RoutingMode
const (
// RoutingModeNative specifies native routing mode
RoutingModeNative = "native"
// RoutingModeTunnel specifies tunneling mode
RoutingModeTunnel = "tunnel"
)
const (
// HTTP403Message specifies the response body for 403 responses, defaults to "Access denied"
HTTP403Message = "http-403-msg"
// ReadCNIConfiguration reads the CNI configuration file and extracts
// Cilium relevant information. This can be used to pass per node
// configuration to Cilium.
ReadCNIConfiguration = "read-cni-conf"
// WriteCNIConfigurationWhenReady writes the CNI configuration to the
// specified location once the agent is ready to serve requests. This
// allows to keep a Kubernetes node NotReady until Cilium is up and
// running and able to schedule endpoints.
WriteCNIConfigurationWhenReady = "write-cni-conf-when-ready"
// CNIExclusive tells the agent to remove other CNI configuration files
CNIExclusive = "cni-exclusive"
// CNIExternalRouting delegates endpoint routing to the chained CNI plugin.
CNIExternalRouting = "cni-external-routing"
// CNILogFile is the path to a log file (on the host) for the CNI plugin
// binary to use for logging.
CNILogFile = "cni-log-file"
// EnableCiliumEndpointSlice enables the cilium endpoint slicing feature.
EnableCiliumEndpointSlice = "enable-cilium-endpoint-slice"
// EnableExternalWorkloads enables the support for external workloads.
EnableExternalWorkloads = "enable-external-workloads"
// EnableSourceIPVerification enables the source ip verification, defaults to true
EnableSourceIPVerification = "enable-source-ip-verification"
)
const (
// NodePortMinDefault is the minimal port to listen for NodePort requests
NodePortMinDefault = 30000
// NodePortMaxDefault is the maximum port to listen for NodePort requests
NodePortMaxDefault = 32767
// NodePortModeSNAT is for SNATing requests to remote nodes
NodePortModeSNAT = "snat"
// NodePortModeDSR is for performing DSR for requests to remote nodes
NodePortModeDSR = "dsr"
// NodePortModeHybrid is a dual mode of the above, that is, DSR for TCP and SNAT for UDP
NodePortModeHybrid = "hybrid"
// NodePortAlgRandom is for randomly selecting a backend
NodePortAlgRandom = "random"
// NodePortAlgMaglev is for using maglev consistent hashing for backend selection
NodePortAlgMaglev = "maglev"
// DSR dispatch mode to encode service into IP option or extension header
DSRDispatchOption = "opt"
// DSR dispatch mode to encapsulate to IPIP
DSRDispatchIPIP = "ipip"
// DSR dispatch mode to encapsulate to Geneve
DSRDispatchGeneve = "geneve"
// NodePortAccelerationDisabled means we do not accelerate NodePort via XDP
NodePortAccelerationDisabled = XDPModeDisabled
// NodePortAccelerationGeneric means we accelerate NodePort via generic XDP
NodePortAccelerationGeneric = XDPModeGeneric
// NodePortAccelerationNative means we accelerate NodePort via native XDP in the driver (preferred)
NodePortAccelerationNative = XDPModeNative
// NodePortAccelerationBestEffort means we accelerate NodePort via native XDP in the driver (preferred), but will skip devices without driver support
NodePortAccelerationBestEffort = XDPModeBestEffort
// KubeProxyReplacementTrue specifies to enable all kube-proxy replacement
// features (might panic).
KubeProxyReplacementTrue = "true"
// KubeProxyReplacementFalse specifies to enable only selected kube-proxy
// replacement features (might panic).
KubeProxyReplacementFalse = "false"
// KubeProxyReplacement healthz server bind address
KubeProxyReplacementHealthzBindAddr = "kube-proxy-replacement-healthz-bind-address"
// PprofAddressAgent is the default value for pprof in the agent
PprofAddressAgent = "localhost"
// PprofPortAgent is the default value for pprof in the agent
PprofPortAgent = 6060
)
// getEnvName returns the environment variable to be used for the given option name.
func getEnvName(option string) string {
under := strings.Replace(option, "-", "_", -1)
upper := strings.ToUpper(under)
return ciliumEnvPrefix + upper
}
// BindEnv binds the option name with a deterministic generated environment
// variable which is based on the given optName. If the same optName is bound
// more than once, this function panics.
func BindEnv(vp *viper.Viper, optName string) {
vp.BindEnv(optName, getEnvName(optName))
}
// BindEnvWithLegacyEnvFallback binds the given option name with either the same
// environment variable as BindEnv, if it's set, or with the given legacyEnvName.
//
// The function is used to work around the viper.BindEnv limitation that only
// one environment variable can be bound for an option, and we need multiple
// environment variables due to backward compatibility reasons.
func BindEnvWithLegacyEnvFallback(vp *viper.Viper, optName, legacyEnvName string) {
envName := getEnvName(optName)
if os.Getenv(envName) == "" {
envName = legacyEnvName
}
vp.BindEnv(optName, envName)
}
// LogRegisteredOptions logs all options that where bound to viper.
func LogRegisteredOptions(vp *viper.Viper, entry *logrus.Entry) {
keys := vp.AllKeys()
slices.Sort(keys)
for _, k := range keys {
ss := vp.GetStringSlice(k)
if len(ss) == 0 {
sm := vp.GetStringMap(k)
for k, v := range sm {
ss = append(ss, fmt.Sprintf("%s=%s", k, v))
}
}
if len(ss) > 0 {
entry.Infof(" --%s='%s'", k, strings.Join(ss, ","))
} else {
entry.Infof(" --%s='%s'", k, vp.GetString(k))
}
}
}
// DaemonConfig is the configuration used by Daemon.
type DaemonConfig struct {
// Private sum of the config written to file. Used to check that the config is not changed
// after.
shaSum [32]byte
CreationTime time.Time
BpfDir string // BPF template files directory
LibDir string // Cilium library files directory
RunDir string // Cilium runtime directory
ExternalEnvoyProxy bool // Whether Envoy is deployed as external DaemonSet or not
LBDevInheritIPAddr string // Device which IP addr used by bpf_host devices
EnableXDPPrefilter bool // Enable XDP-based prefiltering
XDPMode string // XDP mode, values: { xdpdrv | xdpgeneric | none }
EnableTCX bool // Enable attaching endpoint programs using tcx if the kernel supports it
HostV4Addr net.IP // Host v4 address of the snooping device
HostV6Addr net.IP // Host v6 address of the snooping device
EncryptInterface []string // Set of network facing interface to encrypt over
EncryptNode bool // Set to true for encrypting node IP traffic
// If set to true the daemon will detect new and deleted datapath devices
// at runtime and reconfigure the datapath to load programs onto the new
// devices.
EnableRuntimeDeviceDetection bool
DatapathMode string // Datapath mode
RoutingMode string // Routing mode
DryMode bool // Do not create BPF maps, devices, ..
// RestoreState enables restoring the state from previous running daemons.
RestoreState bool
KeepConfig bool // Keep configuration of existing endpoints when starting up.
// AllowLocalhost defines when to allows the local stack to local endpoints
// values: { auto | always | policy }
AllowLocalhost string
// StateDir is the directory where runtime state of endpoints is stored
StateDir string
// Options changeable at runtime
Opts *IntOptions
// Monitor contains the configuration for the node monitor.
Monitor *models.MonitorStatus
// AgentHealthPort is the TCP port for agent health status API
AgentHealthPort int
// ClusterHealthPort is the TCP port for cluster-wide network connectivity health API
ClusterHealthPort int
// ClusterMeshHealthPort is the TCP port for ClusterMesh apiserver health API
ClusterMeshHealthPort int
// AgentLabels contains additional labels to identify this agent in monitor events.
AgentLabels []string
// IPv6ClusterAllocCIDR is the base CIDR used to allocate IPv6 node
// CIDRs if allocation is not performed by an orchestration system
IPv6ClusterAllocCIDR string
// IPv6ClusterAllocCIDRBase is derived from IPv6ClusterAllocCIDR and
// contains the CIDR without the mask, e.g. "fdfd::1/64" -> "fdfd::"
//
// This variable should never be written to, it is initialized via
// DaemonConfig.Validate()
IPv6ClusterAllocCIDRBase string
// IPv6NAT46x64CIDR is the private base CIDR for the NAT46x64 gateway
IPv6NAT46x64CIDR string
// IPv6NAT46x64CIDRBase is derived from IPv6NAT46x64CIDR and contains
// the IPv6 prefix with the masked bits zeroed out
IPv6NAT46x64CIDRBase netip.Addr
// K8sRequireIPv4PodCIDR requires the k8s node resource to specify the
// IPv4 PodCIDR. Cilium will block bootstrapping until the information
// is available.
K8sRequireIPv4PodCIDR bool
// K8sRequireIPv6PodCIDR requires the k8s node resource to specify the
// IPv6 PodCIDR. Cilium will block bootstrapping until the information
// is available.
K8sRequireIPv6PodCIDR bool
// K8sServiceCacheSize is the service cache size for cilium k8s package.
K8sServiceCacheSize uint
// Number of distinct services to buffer at most.
K8sServiceDebounceBufferSize int
// The amount of time to wait to debounce service events before
// emitting the buffer.
K8sServiceDebounceWaitTime time.Duration
// MTU is the maximum transmission unit of the underlying network
MTU int
// RouteMetric is the metric used for the routes added to the cilium_host device
RouteMetric int
// ClusterName is the name of the cluster
ClusterName string
// ClusterID is the unique identifier of the cluster
ClusterID uint32
// CTMapEntriesGlobalTCP is the maximum number of conntrack entries
// allowed in each TCP CT table for IPv4/IPv6.
CTMapEntriesGlobalTCP int
// CTMapEntriesGlobalAny is the maximum number of conntrack entries
// allowed in each non-TCP CT table for IPv4/IPv6.
CTMapEntriesGlobalAny int
// CTMapEntriesTimeout* values configured by the user.
CTMapEntriesTimeoutTCP time.Duration
CTMapEntriesTimeoutAny time.Duration
CTMapEntriesTimeoutSVCTCP time.Duration
CTMapEntriesTimeoutSVCTCPGrace time.Duration
CTMapEntriesTimeoutSVCAny time.Duration
CTMapEntriesTimeoutSYN time.Duration
CTMapEntriesTimeoutFIN time.Duration
// MaxInternalTimerDelay sets a maximum on all periodic timers in
// the agent in order to flush out timer-related bugs in the agent.
MaxInternalTimerDelay time.Duration
// MonitorAggregationInterval configures the interval between monitor
// messages when monitor aggregation is enabled.
MonitorAggregationInterval time.Duration
// MonitorAggregationFlags determines which TCP flags that the monitor
// aggregation ensures reports are generated for when monitor-aggragation
// is enabled. Network byte-order.
MonitorAggregationFlags uint16
// BPFEventsDefaultRateLimit specifies limit of messages per second that can be written to
// BPF events map. This limit is defined for all types of events except dbg and pcap.
// The number of messages is averaged, meaning that if no messages were written
// to the map over 5 seconds, it's possible to write more events than the value of rate limit
// in the 6th second.
//
// If BPFEventsDefaultRateLimit > 0, non-zero value for BPFEventsDefaultBurstLimit must also be provided
// lest the configuration is considered invalid.
BPFEventsDefaultRateLimit uint32
// BPFEventsDefaultBurstLimit specifies the maximum number of messages that can be written
// to BPF events map in 1 second. This limit is defined for all types of events except dbg and pcap.
//
// If BPFEventsDefaultBurstLimit > 0, non-zero value for BPFEventsDefaultRateLimit must also be provided
// lest the configuration is considered invalid.
// If both burst and rate limit are 0 or not specified, no limit is imposed.
BPFEventsDefaultBurstLimit uint32
// BPFMapsDynamicSizeRatio is ratio of total system memory to use for
// dynamic sizing of the CT, NAT, Neighbor and SockRevNAT BPF maps.
BPFMapsDynamicSizeRatio float64
// NATMapEntriesGlobal is the maximum number of NAT mappings allowed
// in the BPF NAT table
NATMapEntriesGlobal int
// NeighMapEntriesGlobal is the maximum number of neighbor mappings
// allowed in the BPF neigh table
NeighMapEntriesGlobal int
// AuthMapEntries is the maximum number of entries in the auth map.
AuthMapEntries int
// PolicyMapEntries is the maximum number of peer identities that an
// endpoint may allow traffic to exchange traffic with.
PolicyMapEntries int
// PolicyMapFullReconciliationInterval is the interval at which to perform
// the full reconciliation of the endpoint policy map.
PolicyMapFullReconciliationInterval time.Duration
// SockRevNatEntries is the maximum number of sock rev nat mappings
// allowed in the BPF rev nat table
SockRevNatEntries int
// DisableCiliumEndpointCRD disables the use of CiliumEndpoint CRD
DisableCiliumEndpointCRD bool
// MaxControllerInterval is the maximum value for a controller's
// RunInterval. Zero means unlimited.
MaxControllerInterval int
// HTTP403Message is the error message to return when a HTTP 403 is returned
// by the proxy, if L7 policy is configured.
HTTP403Message string
ProcFs string
// K8sNamespace is the name of the namespace in which Cilium is
// deployed in when running in Kubernetes mode
K8sNamespace string
// AgentNotReadyNodeTaint is a node taint which prevents pods from being
// scheduled. Once cilium is setup it is removed from the node. Mostly
// used in cloud providers to prevent existing CNI plugins from managing
// pods.
AgentNotReadyNodeTaintKey string
// JoinCluster is 'true' if the agent should join a Cilium cluster via kvstore
// registration
JoinCluster bool
// EnableIPv4 is true when IPv4 is enabled
EnableIPv4 bool
// EnableIPv6 is true when IPv6 is enabled
EnableIPv6 bool
// EnableNat46X64Gateway is true when L3 based NAT46 and NAT64 translation is enabled
EnableNat46X64Gateway bool
// EnableIPv6NDP is true when NDP is enabled for IPv6
EnableIPv6NDP bool
// EnableSRv6 is true when SRv6 encapsulation support is enabled
EnableSRv6 bool
// SRv6EncapMode is the encapsulation mode for SRv6
SRv6EncapMode string
// EnableSCTP is true when SCTP support is enabled.
EnableSCTP bool
// IPv6MCastDevice is the name of device that joins IPv6's solicitation multicast group
IPv6MCastDevice string
// EnableL7Proxy is the option to enable L7 proxy
EnableL7Proxy bool
// EnableIPSec is true when IPSec is enabled
EnableIPSec bool
// IPSec key file for stored keys
IPSecKeyFile string
// Duration of the IPsec key rotation. After that time, we will clean the
// previous IPsec key from the node.
IPsecKeyRotationDuration time.Duration
// Enable watcher for IPsec key. If disabled, a restart of the agent will
// be necessary on key rotations.
EnableIPsecKeyWatcher bool
// EnableIPSecXfrmStateCaching enables IPSec XfrmState caching.
EnableIPSecXfrmStateCaching bool
// EnableIPSecEncryptedOverlay enables IPSec encryption for overlay traffic.
EnableIPSecEncryptedOverlay bool
// BootIDFile is the file containing the boot ID of the node
BootIDFile string
// EnableWireguard enables Wireguard encryption
EnableWireguard bool
// EnableEncryptionStrictMode enables strict mode for encryption
EnableEncryptionStrictMode bool
// WireguardTrackAllIPsFallback forces the WireGuard agent to track all IPs.
WireguardTrackAllIPsFallback bool
// EncryptionStrictModeCIDR is the CIDR to use for strict mode
EncryptionStrictModeCIDR netip.Prefix
// EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of node identities.
// This is required when tunneling is used
// or direct routing is used and the node CIDR and pod CIDR overlap.
EncryptionStrictModeAllowRemoteNodeIdentities bool
// WireguardPersistentKeepalive controls Wireguard PersistentKeepalive option.
WireguardPersistentKeepalive time.Duration
// EnableL2Announcements enables L2 announcement of service IPs
EnableL2Announcements bool
// L2AnnouncerLeaseDuration, if a lease has not been renewed for X amount of time, a new leader can be chosen.
L2AnnouncerLeaseDuration time.Duration
// L2AnnouncerRenewDeadline, the leader will renew the lease every X amount of time.
L2AnnouncerRenewDeadline time.Duration
// L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time.
L2AnnouncerRetryPeriod time.Duration
// NodeEncryptionOptOutLabels contains the label selectors for nodes opting out of
// node-to-node encryption
// This field ignored when marshalling to JSON in DaemonConfig.StoreInFile,
// because a k8sLabels.Selector cannot be unmarshalled from JSON. The
// string is stored in NodeEncryptionOptOutLabelsString instead.
NodeEncryptionOptOutLabels k8sLabels.Selector `json:"-"`
// NodeEncryptionOptOutLabelsString is the string is used to construct
// the label selector in the above field.
NodeEncryptionOptOutLabelsString string
// CLI options
BPFRoot string
BPFSocketLBHostnsOnly bool
CGroupRoot string
BPFCompileDebug string
CompilerFlags []string
ConfigFile string
ConfigDir string
Debug bool
DebugVerbose []string
EnableSocketLB bool
EnableSocketLBTracing bool
EnableSocketLBPeer bool
EnablePolicy string
EnableTracing bool
EnableIPIPTermination bool
EnableUnreachableRoutes bool
FixedIdentityMapping map[string]string
FixedIdentityMappingValidator func(val string) (string, error) `json:"-"`
FixedZoneMapping map[string]uint8
ReverseFixedZoneMapping map[uint8]string
FixedZoneMappingValidator func(val string) (string, error) `json:"-"`
IPv4Range string
IPv6Range string
IPv4ServiceRange string
IPv6ServiceRange string
K8sSyncTimeout time.Duration
AllocatorListTimeout time.Duration
K8sWatcherEndpointSelector string
KVStore string
KVStoreOpt map[string]string
LabelPrefixFile string
Labels []string
LogDriver []string
LogOpt map[string]string
LogSystemLoadConfig bool
// Masquerade specifies whether or not to masquerade packets from endpoints
// leaving the host.
EnableIPv4Masquerade bool
EnableIPv6Masquerade bool
EnableBPFMasquerade bool
EnableMasqueradeRouteSource bool
EnableIPMasqAgent bool
IPMasqAgentConfigPath string
EnableBPFClockProbe bool
EnableIPv4EgressGateway bool
EnableEnvoyConfig bool
InstallIptRules bool
MonitorAggregation string
PreAllocateMaps bool
IPv6NodeAddr string
IPv4NodeAddr string
SocketPath string
TracePayloadlen int
Version string
PrometheusServeAddr string
ToFQDNsMinTTL int
// DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain
// for each FQDN selector in endpoint's restored DNS rules
DNSMaxIPsPerRestoredRule int
// DNSPolicyUnloadOnShutdown defines whether DNS policy rules should be unloaded on
// graceful shutdown.
DNSPolicyUnloadOnShutdown bool
// ToFQDNsProxyPort is the user-configured global, shared, DNS listen port used
// by the DNS Proxy. Both UDP and TCP are handled on the same port. When it
// is 0 a random port will be assigned, and can be obtained from
// DefaultDNSProxy below.
ToFQDNsProxyPort int
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to maintain
// for each FQDN name in an endpoint's FQDN cache
ToFQDNsMaxIPsPerHost int
// ToFQDNsMaxIPsPerHost defines the maximum number of IPs to retain for
// expired DNS lookups with still-active connections
ToFQDNsMaxDeferredConnectionDeletes int
// ToFQDNsIdleConnectionGracePeriod Time during which idle but
// previously active connections with expired DNS lookups are
// still considered alive
ToFQDNsIdleConnectionGracePeriod time.Duration
// FQDNRejectResponse is the dns-proxy response for invalid dns-proxy request
FQDNRejectResponse string
// FQDNProxyResponseMaxDelay The maximum time the DNS proxy holds an allowed
// DNS response before sending it along. Responses are sent as soon as the
// datapath is updated with the new IP information.
FQDNProxyResponseMaxDelay time.Duration
// FQDNRegexCompileLRUSize is the size of the FQDN regex compilation LRU.
// Useful for heavy but repeated FQDN MatchName or MatchPattern use.
FQDNRegexCompileLRUSize int
// Path to a file with DNS cache data to preload on startup
ToFQDNsPreCache string
// ToFQDNsEnableDNSCompression allows the DNS proxy to compress responses to
// endpoints that are larger than 512 Bytes or the EDNS0 option, if present.
ToFQDNsEnableDNSCompression bool
// DNSProxyConcurrencyLimit limits parallel processing of DNS messages in
// DNS proxy at any given point in time.
DNSProxyConcurrencyLimit int
// DNSProxyConcurrencyProcessingGracePeriod is the amount of grace time to
// wait while processing DNS messages when the DNSProxyConcurrencyLimit has
// been reached.
DNSProxyConcurrencyProcessingGracePeriod time.Duration
// DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy.
DNSProxyEnableTransparentMode bool
// DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users
// to disable transparent mode even if IPSec is enabled
DNSProxyInsecureSkipTransparentModeCheck bool
// DNSProxyLockCount is the array size containing mutexes which protect
// against parallel handling of DNS response names.
DNSProxyLockCount int
// DNSProxyLockTimeout is timeout when acquiring the locks controlled by
// DNSProxyLockCount.
DNSProxyLockTimeout time.Duration
// DNSProxySocketLingerTimeout defines how many seconds we wait for the connection
// between the DNS proxy and the upstream server to be closed.
DNSProxySocketLingerTimeout int
// EnableBPFTProxy enables implementing proxy redirection via BPF
// mechanisms rather than iptables rules.
EnableBPFTProxy bool
// EnableAutoDirectRouting enables installation of direct routes to
// other nodes when available
EnableAutoDirectRouting bool
// DirectRoutingSkipUnreachable skips installation of direct routes
// to nodes when they're not on the same L2
DirectRoutingSkipUnreachable bool
// EnableLocalNodeRoute controls installation of the route which points
// the allocation prefix of the local node.
EnableLocalNodeRoute bool
// EnableHealthChecking enables health checking between nodes and
// health endpoints
EnableHealthChecking bool
// EnableEndpointHealthChecking enables health checking between virtual
// health endpoints
EnableEndpointHealthChecking bool
// EnableHealthCheckNodePort enables health checking of NodePort by
// cilium
EnableHealthCheckNodePort bool
// EnableHealthCheckLoadBalancerIP enables health checking of LoadBalancerIP
// by cilium
EnableHealthCheckLoadBalancerIP bool
// HealthCheckICMPFailureThreshold is the number of ICMP packets sent for each health
// checking run. If at least an ICMP response is received, the node or endpoint
// is marked as healthy.
HealthCheckICMPFailureThreshold int
// KVstoreLeaseTTL is the time-to-live for kvstore lease.
KVstoreLeaseTTL time.Duration
// KVstoreMaxConsecutiveQuorumErrors is the maximum number of acceptable
// kvstore consecutive quorum errors before the agent assumes permanent failure
KVstoreMaxConsecutiveQuorumErrors uint
// KVstorePeriodicSync is the time interval in which periodic
// synchronization with the kvstore occurs
KVstorePeriodicSync time.Duration
// KVstoreConnectivityTimeout is the timeout when performing kvstore operations
KVstoreConnectivityTimeout time.Duration
// KVstorePodNetworkSupport enables the support for running the Cilium KVstore
// in pod network.
KVstorePodNetworkSupport bool
// IdentityChangeGracePeriod is the grace period that needs to pass
// before an endpoint that has changed its identity will start using
// that new identity. During the grace period, the new identity has
// already been allocated and other nodes in the cluster have a chance
// to whitelist the new upcoming identity of the endpoint.
IdentityChangeGracePeriod time.Duration
// IdentityRestoreGracePeriod is the grace period that needs to pass before CIDR identities
// restored during agent restart are released. If any of the restored identities remains
// unused after this time, they will be removed from the IP cache. Any of the restored
// identities that are used in network policies will remain in the IP cache until all such
// policies are removed.
//
// The default is 30 seconds for k8s clusters, and 10 minutes for kvstore clusters
IdentityRestoreGracePeriod time.Duration
// EndpointQueueSize is the size of the EventQueue per-endpoint. A larger
// queue means that more events can be buffered per-endpoint. This is useful
// in the case where a cluster might be under high load for endpoint-related
// events, specifically those which cause many regenerations.
EndpointQueueSize int
// ConntrackGCInterval is the connection tracking garbage collection
// interval
ConntrackGCInterval time.Duration
// ConntrackGCMaxInterval if set limits the automatic GC interval calculation to
// the specified maximum value.
ConntrackGCMaxInterval time.Duration
// LoopbackIPv4 is the address to use for service loopback SNAT
LoopbackIPv4 string
// LocalRouterIPv4 is the link-local IPv4 address used for Cilium's router device
LocalRouterIPv4 string
// LocalRouterIPv6 is the link-local IPv6 address used for Cilium's router device
LocalRouterIPv6 string
// EnableEndpointRoutes enables use of per endpoint routes
EnableEndpointRoutes bool
// Specifies wheather to annotate the kubernetes nodes or not
AnnotateK8sNode bool
// EnableNodePort enables k8s NodePort service implementation in BPF
EnableNodePort bool
// EnableSVCSourceRangeCheck enables check of loadBalancerSourceRanges
EnableSVCSourceRangeCheck bool
// EnableHealthDatapath enables IPIP health probes data path
EnableHealthDatapath bool
// EnableHostPort enables k8s Pod's hostPort mapping through BPF
EnableHostPort bool
// EnableHostLegacyRouting enables the old routing path via stack.
EnableHostLegacyRouting bool
// NodePortNat46X64 indicates whether NAT46 / NAT64 can be used.
NodePortNat46X64 bool
// NodePortMode indicates in which mode NodePort implementation should run
// ("snat", "dsr" or "hybrid")
NodePortMode string
// LoadBalancerModeAnnotation tells whether controller should check service
// level annotation for configuring bpf load balancing algorithm.
LoadBalancerModeAnnotation bool
// NodePortAlg indicates which backend selection algorithm is used
// ("random" or "maglev")
NodePortAlg string
// LoadBalancerAlgorithmAnnotation tells whether controller should check service
// level annotation for configuring bpf load balancing algorithm.
LoadBalancerAlgorithmAnnotation bool
// LoadBalancerDSRDispatch indicates the method for pushing packets to
// backends under DSR ("opt" or "ipip")
LoadBalancerDSRDispatch string
// LoadBalancerRSSv4CIDR defines the outer source IPv4 prefix for DSR/IPIP
LoadBalancerRSSv4CIDR string
LoadBalancerRSSv4 net.IPNet
// LoadBalancerRSSv4CIDR defines the outer source IPv6 prefix for DSR/IPIP
LoadBalancerRSSv6CIDR string
LoadBalancerRSSv6 net.IPNet
// LoadBalancerExternalControlPlane tells whether to not use kube-apiserver as
// its control plane in lb-only mode.
LoadBalancerExternalControlPlane bool
// LoadBalancerProtocolDifferentiation enables support for service protocol differentiation (TCP, UDP, SCTP)
LoadBalancerProtocolDifferentiation bool
// EnablePMTUDiscovery indicates whether to send ICMP fragmentation-needed
// replies to the client (when needed).
EnablePMTUDiscovery bool
// NodePortAcceleration indicates whether NodePort should be accelerated
// via XDP ("none", "generic", "native", or "best-effort")
NodePortAcceleration string
// NodePortBindProtection rejects bind requests to NodePort service ports
NodePortBindProtection bool
// EnableAutoProtectNodePortRange enables appending NodePort range to
// net.ipv4.ip_local_reserved_ports if it overlaps with ephemeral port
// range (net.ipv4.ip_local_port_range)
EnableAutoProtectNodePortRange bool
// KubeProxyReplacement controls how to enable kube-proxy replacement
// features in BPF datapath
KubeProxyReplacement string
// AddressScopeMax controls the maximum address scope for addresses to be
// considered local ones with HOST_ID in the ipcache
AddressScopeMax int
// EnableRecorder enables the datapath pcap recorder
EnableRecorder bool
// EnableMKE enables MKE specific 'chaining' for kube-proxy replacement
EnableMKE bool
// CgroupPathMKE points to the cgroupv1 net_cls mount instance
CgroupPathMKE string
// KubeProxyReplacementHealthzBindAddr is the KubeProxyReplacement healthz server bind addr
KubeProxyReplacementHealthzBindAddr string
// EnableExternalIPs enables implementation of k8s services with externalIPs in datapath
EnableExternalIPs bool
// EnableHostFirewall enables network policies for the host
EnableHostFirewall bool
// EnableLocalRedirectPolicy enables redirect policies to redirect traffic within nodes
EnableLocalRedirectPolicy bool
// NodePortMin is the minimum port address for the NodePort range
NodePortMin int
// NodePortMax is the maximum port address for the NodePort range
NodePortMax int
// EnableSessionAffinity enables a support for service sessionAffinity
EnableSessionAffinity bool
// Selection of BPF main clock source (ktime vs jiffies)
ClockSource BPFClockSource
// EnableIdentityMark enables setting the mark field with the identity for
// local traffic. This may be disabled if chaining modes and Cilium use
// conflicting marks.
EnableIdentityMark bool
// KernelHz is the HZ rate the kernel is operating in
KernelHz int
// ExcludeLocalAddresses excludes certain addresses to be recognized as
// a local address
ExcludeLocalAddresses []*net.IPNet
// IPv4PodSubnets available subnets to be assign IPv4 addresses to pods from
IPv4PodSubnets []*net.IPNet
// IPv6PodSubnets available subnets to be assign IPv6 addresses to pods from
IPv6PodSubnets []*net.IPNet
// IPAM is the IPAM method to use
IPAM string
// IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool
IPAMMultiPoolPreAllocation map[string]string
// IPAMDefaultIPPool the default IP Pool when using multi-pool
IPAMDefaultIPPool string
// AutoCreateCiliumNodeResource enables automatic creation of a
// CiliumNode resource for the local node
AutoCreateCiliumNodeResource bool
// ExcludeNodeLabelPatterns allows for excluding unnecessary labels from being propagated from k8s node to cilium
// node object. This allows for avoiding unnecessary events being broadcast to all nodes in the cluster.
ExcludeNodeLabelPatterns []*regexp.Regexp
// IPv4NativeRoutingCIDR describes a CIDR in which pod IPs are routable
IPv4NativeRoutingCIDR *cidr.CIDR
// IPv6NativeRoutingCIDR describes a CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR *cidr.CIDR
// MasqueradeInterfaces is the selector used to select interfaces subject
// to egress masquerading.
MasqueradeInterfaces []string
// PolicyTriggerInterval is the amount of time between when policy updates
// are triggered.
PolicyTriggerInterval time.Duration
// IdentityAllocationMode specifies what mode to use for identity
// allocation
IdentityAllocationMode string
// AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in
// the network policy for cilium-agent.
AllowICMPFragNeeded bool
// Azure options
// PolicyAuditMode enables non-drop mode for installed policies. In
// audit mode packets affected by policies will not be dropped.
// Policy related decisions can be checked via the poicy verdict messages.
PolicyAuditMode bool
// PolicyAccounting enable policy accounting
PolicyAccounting bool
// EnableIPv4FragmentsTracking enables IPv4 fragments tracking for
// L4-based lookups. Needs LRU map support.
EnableIPv4FragmentsTracking bool
// FragmentsMapEntries is the maximum number of fragmented datagrams
// that can simultaneously be tracked in order to retrieve their L4
// ports for all fragments.
FragmentsMapEntries int
// SizeofCTElement is the size of an element (key + value) in the CT map.
SizeofCTElement int
// SizeofNATElement is the size of an element (key + value) in the NAT map.
SizeofNATElement int
// SizeofNeighElement is the size of an element (key + value) in the neigh
// map.
SizeofNeighElement int
// SizeofSockRevElement is the size of an element (key + value) in the neigh
// map.
SizeofSockRevElement int
// k8sEnableLeasesFallbackDiscovery enables k8s to fallback to API probing to check
// for the support of Leases in Kubernetes when there is an error in discovering
// API groups using Discovery API.
// We require to check for Leases capabilities in operator only, which uses Leases for leader
// election purposes in HA mode.
// This is only enabled for cilium-operator
K8sEnableLeasesFallbackDiscovery bool
// LBMapEntries is the maximum number of entries allowed in BPF lbmap.
LBMapEntries int
// LBServiceMapEntries is the maximum number of entries allowed in BPF lbmap for services.
LBServiceMapEntries int
// LBBackendMapEntries is the maximum number of entries allowed in BPF lbmap for service backends.
LBBackendMapEntries int
// LBRevNatEntries is the maximum number of entries allowed in BPF lbmap for reverse NAT.
LBRevNatEntries int
// LBAffinityMapEntries is the maximum number of entries allowed in BPF lbmap for session affinities.
LBAffinityMapEntries int
// LBSourceRangeAllTypes enables propagation of loadbalancerSourceRanges to all Kubernetes
// service types which were created from the LoadBalancer service.
LBSourceRangeAllTypes bool
// LBSourceRangeMapEntries is the maximum number of entries allowed in BPF lbmap for source ranges.
LBSourceRangeMapEntries int
// LBMaglevMapEntries is the maximum number of entries allowed in BPF lbmap for maglev.
LBMaglevMapEntries int
// CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not
// available.
CRDWaitTimeout time.Duration
// EgressMultiHomeIPRuleCompat instructs Cilium to use a new scheme to
// store rules and routes under ENI and Azure IPAM modes, if false.
// Otherwise, it will use the old scheme.
EgressMultiHomeIPRuleCompat bool
// Install ingress/egress routes through uplink on host for Pods when working with
// delegated IPAM plugin.
InstallUplinkRoutesForDelegatedIPAM bool
// InstallNoConntrackIptRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic.
InstallNoConntrackIptRules bool
// ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve
// the provided comma-separated list of ports in the container network namespace
ContainerIPLocalReservedPorts string
// EnableCustomCalls enables tail call hooks for user-defined custom
// eBPF programs, typically used to collect custom per-endpoint
// metrics.
EnableCustomCalls bool
// BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from.
BGPSecretsNamespace string
// ExternalClusterIP enables routing to ClusterIP services from outside
// the cluster. This mirrors the behaviour of kube-proxy.
ExternalClusterIP bool
// ARPPingRefreshPeriod is the ARP entries refresher period.
ARPPingRefreshPeriod time.Duration
// EnableCiliumEndpointSlice enables the cilium endpoint slicing feature.
EnableCiliumEndpointSlice bool
// ARPPingKernelManaged denotes whether kernel can auto-refresh Neighbor entries
ARPPingKernelManaged bool
// VLANBPFBypass list of explicitly allowed VLAN id's for bpf logic bypass
VLANBPFBypass []int
// DisableExternalIPMigration disable externalIP mitigation (CVE-2020-8554)
DisableExternalIPMitigation bool
// EnableL2NeighDiscovery determines if cilium should perform L2 neighbor
// discovery.
EnableL2NeighDiscovery bool
// EnableICMPRules enables ICMP-based rule support for Cilium Network Policies.
EnableICMPRules bool
// Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation.
UseCiliumInternalIPForIPsec bool
// BypassIPAvailabilityUponRestore bypasses the IP availability error
// within IPAM upon endpoint restore and allows the use of the restored IP
// regardless of whether it's available in the pool.
BypassIPAvailabilityUponRestore bool
// EnableK8sTerminatingEndpoint enables auto-detect of terminating state for
// Kubernetes service endpoints.
EnableK8sTerminatingEndpoint bool
// EnableVTEP enable Cilium VXLAN VTEP integration
EnableVTEP bool
// VtepEndpoints VTEP endpoint IPs
VtepEndpoints []net.IP
// VtepCIDRs VTEP CIDRs
VtepCIDRs []*cidr.CIDR
// VtepMask VTEP Mask
VtepCidrMask net.IP
// VtepMACs VTEP MACs
VtepMACs []mac.MAC
// TCFilterPriority sets the priority of the cilium tc filter, enabling other
// filters to be inserted prior to the cilium filter.
TCFilterPriority uint16
// Enables BGP control plane features.
EnableBGPControlPlane bool
// Enables BGP control plane status reporting.
EnableBGPControlPlaneStatusReport bool
// BGPRouterIDAllocationMode is the mode to allocate the BGP router-id in ipv6 standalone environment.
BGPRouterIDAllocationMode string
// BPFMapEventBuffers has configuration on what BPF map event buffers to enabled
// and configuration options for those.
BPFMapEventBuffers map[string]string
BPFMapEventBuffersValidator func(val string) (string, error) `json:"-"`
bpfMapEventConfigs BPFEventBufferConfigs
// BPFEventsDropEnabled controls whether the Cilium datapath exposes "drop" events to Cilium monitor and Hubble.
BPFEventsDropEnabled bool
// BPFEventsPolicyVerdictEnabled controls whether the Cilium datapath exposes "policy verdict" events to Cilium monitor and Hubble.
BPFEventsPolicyVerdictEnabled bool
// BPFEventsTraceEnabled controls whether the Cilium datapath exposes "trace" events to Cilium monitor and Hubble.
BPFEventsTraceEnabled bool
// BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled.
BPFConntrackAccounting bool
// IPAMCiliumNodeUpdateRate is the maximum rate at which the CiliumNode custom
// resource is updated.
IPAMCiliumNodeUpdateRate time.Duration
// EnableK8sNetworkPolicy enables support for K8s NetworkPolicy.
EnableK8sNetworkPolicy bool
// EnableCiliumNetworkPolicy enables support for Cilium Network Policy.
EnableCiliumNetworkPolicy bool
// EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide
// Network Policy.
EnableCiliumClusterwideNetworkPolicy bool
// PolicyCIDRMatchMode is the list of entities that can be selected by CIDR policy.
// Currently supported values:
// - world
// - world, remote-node
PolicyCIDRMatchMode []string
// MaxConnectedClusters sets the maximum number of clusters that can be
// connected in a clustermesh.
// The value is used to determine the bit allocation for cluster ID and
// identity in a numeric identity. Values > 255 will decrease the number of
// allocatable identities.
MaxConnectedClusters uint32
// ForceDeviceRequired enforces the attachment of BPF programs on native device.
ForceDeviceRequired bool
// ServiceNoBackendResponse determines how we handle traffic to a service with no backends.
ServiceNoBackendResponse string
// EnableNodeSelectorLabels enables use of the node label based identity
EnableNodeSelectorLabels bool
// NodeLabels is the list of label prefixes used to determine identity of a node (requires enabling of
// EnableNodeSelectorLabels)
NodeLabels []string
// EnableSocketLBPodConnectionTermination enables the termination of connections from pods
// to deleted service backends when socket-LB is enabled
EnableSocketLBPodConnectionTermination bool
// EnableInternalTrafficPolicy enables handling routing for services with internalTrafficPolicy configured
EnableInternalTrafficPolicy bool
// EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode
EnableNonDefaultDenyPolicies bool
// EnableSourceIPVerification enables the source ip validation of connection from endpoints to endpoints
EnableSourceIPVerification bool
// EnableEndpointLockdownOnPolicyOverflow enables endpoint lockdown when an endpoint's
// policy map overflows.
EnableEndpointLockdownOnPolicyOverflow bool
}
var (
// Config represents the daemon configuration
Config = &DaemonConfig{
CreationTime: time.Now(),
Opts: NewIntOptions(&DaemonOptionLibrary),
Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0},
IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR,
IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase,
IPAMDefaultIPPool: defaults.IPAMDefaultIPPool,
EnableHealthChecking: defaults.EnableHealthChecking,
EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking,
EnableHealthCheckLoadBalancerIP: defaults.EnableHealthCheckLoadBalancerIP,
EnableHealthCheckNodePort: defaults.EnableHealthCheckNodePort,
HealthCheckICMPFailureThreshold: defaults.HealthCheckICMPFailureThreshold,
EnableIPv4: defaults.EnableIPv4,
EnableIPv6: defaults.EnableIPv6,
EnableIPv6NDP: defaults.EnableIPv6NDP,
EnableSCTP: defaults.EnableSCTP,
EnableL7Proxy: defaults.EnableL7Proxy,
DNSMaxIPsPerRestoredRule: defaults.DNSMaxIPsPerRestoredRule,
ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost,
KVstorePeriodicSync: defaults.KVstorePeriodicSync,
KVstoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout,
KVstorePodNetworkSupport: defaults.KVstorePodNetworkSupport,
IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod,
IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriodK8s,
FixedIdentityMapping: make(map[string]string),
KVStoreOpt: make(map[string]string),
LogOpt: make(map[string]string),
LoopbackIPv4: defaults.LoopbackIPv4,
EnableEndpointRoutes: defaults.EnableEndpointRoutes,
AnnotateK8sNode: defaults.AnnotateK8sNode,
K8sServiceCacheSize: defaults.K8sServiceCacheSize,
AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource,
IdentityAllocationMode: IdentityAllocationModeKVstore,
AllowICMPFragNeeded: defaults.AllowICMPFragNeeded,
AllocatorListTimeout: defaults.AllocatorListTimeout,
EnableICMPRules: defaults.EnableICMPRules,
UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec,
K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery,
ExternalClusterIP: defaults.ExternalClusterIP,
EnableVTEP: defaults.EnableVTEP,
EnableBGPControlPlane: defaults.EnableBGPControlPlane,
EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy,
EnableCiliumNetworkPolicy: defaults.EnableCiliumNetworkPolicy,
EnableCiliumClusterwideNetworkPolicy: defaults.EnableCiliumClusterwideNetworkPolicy,
PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode,
MaxConnectedClusters: defaults.MaxConnectedClusters,
BPFEventsDropEnabled: defaults.BPFEventsDropEnabled,
BPFEventsPolicyVerdictEnabled: defaults.BPFEventsPolicyVerdictEnabled,
BPFEventsTraceEnabled: defaults.BPFEventsTraceEnabled,
BPFConntrackAccounting: defaults.BPFConntrackAccounting,
EnableEnvoyConfig: defaults.EnableEnvoyConfig,
EnableInternalTrafficPolicy: defaults.EnableInternalTrafficPolicy,
EnableNonDefaultDenyPolicies: defaults.EnableNonDefaultDenyPolicies,
EnableSourceIPVerification: defaults.EnableSourceIPVerification,
}
)
// IsExcludedLocalAddress returns true if the specified IP matches one of the
// excluded local IP ranges
func (c *DaemonConfig) IsExcludedLocalAddress(ip net.IP) bool {
for _, ipnet := range c.ExcludeLocalAddresses {
if ipnet.Contains(ip) {
return true
}
}
return false
}
// IsPodSubnetsDefined returns true if encryption subnets should be configured at init time.
func (c *DaemonConfig) IsPodSubnetsDefined() bool {
return len(c.IPv4PodSubnets) > 0 || len(c.IPv6PodSubnets) > 0
}
// NodeConfigFile is the name of the C header which contains the node's
// network parameters.
const nodeConfigFile = "node_config.h"
// GetNodeConfigPath returns the full path of the NodeConfigFile.
func (c *DaemonConfig) GetNodeConfigPath() string {
return filepath.Join(c.GetGlobalsDir(), nodeConfigFile)
}
// GetGlobalsDir returns the path for the globals directory.
func (c *DaemonConfig) GetGlobalsDir() string {
return filepath.Join(c.StateDir, "globals")
}
// AlwaysAllowLocalhost returns true if the daemon has the option set that
// localhost can always reach local endpoints
func (c *DaemonConfig) AlwaysAllowLocalhost() bool {
switch c.AllowLocalhost {
case AllowLocalhostAlways:
return true
case AllowLocalhostAuto, AllowLocalhostPolicy:
return false
default:
return false
}
}
// TunnelingEnabled returns true if tunneling is enabled.
func (c *DaemonConfig) TunnelingEnabled() bool {
// We check if routing mode is not native rather than checking if it's
// tunneling because, in unit tests, RoutingMode is usually not set and we
// would like for TunnelingEnabled to default to the actual default
// (tunneling is enabled) in that case.
return c.RoutingMode != RoutingModeNative
}
// AreDevicesRequired returns true if the agent needs to attach to the native
// devices to implement some features.
func (c *DaemonConfig) AreDevicesRequired() bool {
return c.EnableNodePort || c.EnableHostFirewall || c.EnableWireguard ||
c.EnableL2Announcements || c.ForceDeviceRequired || c.EnableIPSecEncryptedOverlay
}
// NeedBPFHostOnWireGuardDevice returns true if the agent needs to attach
// a BPF program on the Ingress of Cilium's WireGuard device
func (c *DaemonConfig) NeedBPFHostOnWireGuardDevice() bool {
if !c.EnableWireguard {
return false
}
// In native routing mode we want to deliver packets to local endpoints
// straight from BPF, without passing through the stack.
// This matches overlay mode (where bpf_overlay would handle the delivery)
// and native routing mode without encryption (where bpf_host at the native
// device would handle the delivery).
if !c.TunnelingEnabled() {
return true
}
// When WG & encrypt-node are on, a NodePort BPF to-be forwarded request
// to a remote node running a selected service endpoint must be encrypted.
// To make the NodePort's rev-{S,D}NAT translations to happen for a reply
// from the remote node, we need to attach bpf_host to the Cilium's WG
// netdev (otherwise, the WG netdev after decrypting the reply will pass
// it to the stack which drops the packet).
if c.EnableNodePort && c.EncryptNode {
return true
}
return false
}
// MasqueradingEnabled returns true if either IPv4 or IPv6 masquerading is enabled.
func (c *DaemonConfig) MasqueradingEnabled() bool {
return c.EnableIPv4Masquerade || c.EnableIPv6Masquerade
}
// IptablesMasqueradingIPv4Enabled returns true if iptables-based
// masquerading is enabled for IPv4.
func (c *DaemonConfig) IptablesMasqueradingIPv4Enabled() bool {
return !c.EnableBPFMasquerade && c.EnableIPv4Masquerade
}
// IptablesMasqueradingIPv6Enabled returns true if iptables-based
// masquerading is enabled for IPv6.
func (c *DaemonConfig) IptablesMasqueradingIPv6Enabled() bool {
return !c.EnableBPFMasquerade && c.EnableIPv6Masquerade
}
// IptablesMasqueradingEnabled returns true if iptables-based
// masquerading is enabled.
func (c *DaemonConfig) IptablesMasqueradingEnabled() bool {
return c.IptablesMasqueradingIPv4Enabled() || c.IptablesMasqueradingIPv6Enabled()
}
// NodeIpsetNeeded returns true if a node ipsets should be used to skip
// masquerading for traffic to cluster nodes.
func (c *DaemonConfig) NodeIpsetNeeded() bool {
return !c.TunnelingEnabled() && c.IptablesMasqueradingEnabled()
}
// NodeEncryptionEnabled returns true if node encryption is enabled
func (c *DaemonConfig) NodeEncryptionEnabled() bool {
return c.EncryptNode
}
// EncryptionEnabled returns true if encryption is enabled
func (c *DaemonConfig) EncryptionEnabled() bool {
return c.EnableIPSec
}
// IPv4Enabled returns true if IPv4 is enabled
func (c *DaemonConfig) IPv4Enabled() bool {
return c.EnableIPv4
}
// IPv6Enabled returns true if IPv6 is enabled
func (c *DaemonConfig) IPv6Enabled() bool {
return c.EnableIPv6
}
// LBProtoDiffEnabled returns true if LoadBalancerProtocolDifferentiation is enabled
func (c *DaemonConfig) LBProtoDiffEnabled() bool {
return c.LoadBalancerProtocolDifferentiation
}
// IPv6NDPEnabled returns true if IPv6 NDP support is enabled
func (c *DaemonConfig) IPv6NDPEnabled() bool {
return c.EnableIPv6NDP
}
// SCTPEnabled returns true if SCTP support is enabled
func (c *DaemonConfig) SCTPEnabled() bool {
return c.EnableSCTP
}
// HealthCheckingEnabled returns true if health checking is enabled
func (c *DaemonConfig) HealthCheckingEnabled() bool {
return c.EnableHealthChecking
}
// IPAMMode returns the IPAM mode
func (c *DaemonConfig) IPAMMode() string {
return strings.ToLower(c.IPAM)
}
// TracingEnabled returns if tracing policy (outlining which rules apply to a
// specific set of labels) is enabled.
func (c *DaemonConfig) TracingEnabled() bool {
return c.Opts.IsEnabled(PolicyTracing)
}
// UnreachableRoutesEnabled returns true if unreachable routes is enabled
func (c *DaemonConfig) UnreachableRoutesEnabled() bool {
return c.EnableUnreachableRoutes
}
// CiliumNamespaceName returns the name of the namespace in which Cilium is
// deployed in
func (c *DaemonConfig) CiliumNamespaceName() string {
return c.K8sNamespace
}
// AgentNotReadyNodeTaintValue returns the value of the taint key that cilium agents
// will manage on their nodes
func (c *DaemonConfig) AgentNotReadyNodeTaintValue() string {
if c.AgentNotReadyNodeTaintKey != "" {
return c.AgentNotReadyNodeTaintKey
} else {
return defaults.AgentNotReadyNodeTaint
}
}
// K8sNetworkPolicyEnabled returns true if cilium agent needs to support K8s NetworkPolicy, false otherwise.
func (c *DaemonConfig) K8sNetworkPolicyEnabled() bool {
return c.EnableK8sNetworkPolicy
}
func (c *DaemonConfig) PolicyCIDRMatchesNodes() bool {
for _, mode := range c.PolicyCIDRMatchMode {
if mode == "nodes" {
return true
}
}
return false
}
// PerNodeLabelsEnabled returns true if per-node labels feature
// is enabled
func (c *DaemonConfig) PerNodeLabelsEnabled() bool {
return c.EnableNodeSelectorLabels
}
func (c *DaemonConfig) validatePolicyCIDRMatchMode() error {
// Currently, the only acceptable values is "nodes".
for _, mode := range c.PolicyCIDRMatchMode {
switch mode {
case "nodes":
continue
default:
return fmt.Errorf("unknown CIDR match mode: %s", mode)
}
}
return nil
}
// DirectRoutingDeviceRequired return whether the Direct Routing Device is needed under
// the current configuration.
func (c *DaemonConfig) DirectRoutingDeviceRequired() bool {
// BPF NodePort and BPF Host Routing are using the direct routing device now.
// When tunneling is enabled, node-to-node redirection will be done by tunneling.
BPFHostRoutingEnabled := !c.EnableHostLegacyRouting
// XDP needs IPV4_DIRECT_ROUTING when building tunnel headers:
if c.EnableNodePort && c.NodePortAcceleration != NodePortAccelerationDisabled {
return true
}
return c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard
}
func (c *DaemonConfig) LoadBalancerUsesDSR() bool {
return c.NodePortMode == NodePortModeDSR ||
c.NodePortMode == NodePortModeHybrid ||
c.LoadBalancerModeAnnotation
}
// KVstoreEnabledWithoutPodNetworkSupport returns whether Cilium is configured to connect
// to an external KVStore, and the support for running it in pod network is disabled.
func (c *DaemonConfig) KVstoreEnabledWithoutPodNetworkSupport() bool {
return c.KVStore != "" && !c.KVstorePodNetworkSupport
}
func (c *DaemonConfig) validateIPv6ClusterAllocCIDR() error {
ip, cidr, err := net.ParseCIDR(c.IPv6ClusterAllocCIDR)
if err != nil {
return err
}
if ones, _ := cidr.Mask.Size(); ones != 64 {
return fmt.Errorf("Prefix length must be /64")
}
c.IPv6ClusterAllocCIDRBase = ip.Mask(cidr.Mask).String()
return nil
}
func (c *DaemonConfig) validateIPv6NAT46x64CIDR() error {
parsedPrefix, err := netip.ParsePrefix(c.IPv6NAT46x64CIDR)
if err != nil {
return err
}
if parsedPrefix.Bits() != 96 {
return fmt.Errorf("Prefix length must be /96")
}
c.IPv6NAT46x64CIDRBase = parsedPrefix.Masked().Addr()
return nil
}
func (c *DaemonConfig) validateContainerIPLocalReservedPorts() error {
if c.ContainerIPLocalReservedPorts == "" || c.ContainerIPLocalReservedPorts == defaults.ContainerIPLocalReservedPortsAuto {
return nil
}
if regexp.MustCompile(`^(\d+(-\d+)?)(,\d+(-\d+)?)*$`).MatchString(c.ContainerIPLocalReservedPorts) {
return nil
}
return fmt.Errorf("Invalid comma separated list of of ranges for %s option", ContainerIPLocalReservedPorts)
}
// Validate validates the daemon configuration
func (c *DaemonConfig) Validate(vp *viper.Viper) error {
if err := c.validateIPv6ClusterAllocCIDR(); err != nil {
return fmt.Errorf("unable to parse CIDR value '%s' of option --%s: %w",
c.IPv6ClusterAllocCIDR, IPv6ClusterAllocCIDRName, err)
}
if err := c.validateIPv6NAT46x64CIDR(); err != nil {
return fmt.Errorf("unable to parse internal CIDR value '%s': %w",
c.IPv6NAT46x64CIDR, err)
}
if c.MTU < 0 {
return fmt.Errorf("MTU '%d' cannot be negative", c.MTU)
}
if c.RouteMetric < 0 {
return fmt.Errorf("RouteMetric '%d' cannot be negative", c.RouteMetric)
}
if c.IPAM == ipamOption.IPAMENI && c.EnableIPv6 {
return fmt.Errorf("IPv6 cannot be enabled in ENI IPAM mode")
}
if c.EnableIPv6NDP {
if !c.EnableIPv6 {
return fmt.Errorf("IPv6NDP cannot be enabled when IPv6 is not enabled")
}
if len(c.IPv6MCastDevice) == 0 {
return fmt.Errorf("IPv6NDP cannot be enabled without %s", IPv6MCastDevice)
}
}
switch c.RoutingMode {
case RoutingModeNative, RoutingModeTunnel:
default:
return fmt.Errorf("invalid routing mode %q, valid modes = {%q, %q}",
c.RoutingMode, RoutingModeTunnel, RoutingModeNative)
}
cinfo := clustermeshTypes.ClusterInfo{
ID: c.ClusterID,
Name: c.ClusterName,
MaxConnectedClusters: c.MaxConnectedClusters,
}
if err := cinfo.InitClusterIDMax(); err != nil {
return err
}
if err := cinfo.Validate(); err != nil {
return err
}
if err := c.checkMapSizeLimits(); err != nil {
return err
}
if err := c.checkIPv4NativeRoutingCIDR(); err != nil {
return err
}
if err := c.checkIPv6NativeRoutingCIDR(); err != nil {
return err
}
if err := c.checkIPAMDelegatedPlugin(); err != nil {
return err
}
// Validate that the KVStore Lease TTL value lies between a particular range.
if c.KVstoreLeaseTTL > defaults.KVstoreLeaseMaxTTL || c.KVstoreLeaseTTL < defaults.LockLeaseTTL {
return fmt.Errorf("KVstoreLeaseTTL does not lie in required range(%ds, %ds)",
int64(defaults.LockLeaseTTL.Seconds()),
int64(defaults.KVstoreLeaseMaxTTL.Seconds()))
}
if c.EnableVTEP {
err := c.validateVTEP(vp)
if err != nil {
return fmt.Errorf("Failed to validate VTEP configuration: %w", err)
}
}
if err := c.validatePolicyCIDRMatchMode(); err != nil {
return err
}
if err := c.validateContainerIPLocalReservedPorts(); err != nil {
return err
}
return nil
}
// ReadDirConfig reads the given directory and returns a map that maps the
// filename to the contents of that file.
func ReadDirConfig(dirName string) (map[string]interface{}, error) {
m := map[string]interface{}{}
files, err := os.ReadDir(dirName)
if err != nil && !os.IsNotExist(err) {
return nil, fmt.Errorf("unable to read configuration directory: %w", err)
}
for _, f := range files {
if f.IsDir() {
continue
}
fName := filepath.Join(dirName, f.Name())
// the file can still be a symlink to a directory
if f.Type()&os.ModeSymlink == 0 {
absFileName, err := filepath.EvalSymlinks(fName)
if err != nil {
log.WithError(err).Warnf("Unable to read configuration file %q", absFileName)
continue
}
fName = absFileName
}
fi, err := os.Stat(fName)
if err != nil {
log.WithError(err).Warnf("Unable to read configuration file %q", fName)
continue
}
if fi.Mode().IsDir() {
continue
}
b, err := os.ReadFile(fName)
if err != nil {
log.WithError(err).Warnf("Unable to read configuration file %q", fName)
continue
}
m[f.Name()] = string(bytes.TrimSpace(b))
}
return m, nil
}
// MergeConfig merges the given configuration map with viper's configuration.
func MergeConfig(vp *viper.Viper, m map[string]interface{}) error {
err := vp.MergeConfigMap(m)
if err != nil {
return fmt.Errorf("unable to read merge directory configuration: %w", err)
}
return nil
}
// ReplaceDeprecatedFields replaces the deprecated options set with the new set
// of options that overwrite the deprecated ones.
// This function replaces the deprecated fields used by environment variables
// with a different name than the option they are setting. This also replaces
// the deprecated names used in the Kubernetes ConfigMap.
// Once we remove them from this function we also need to remove them from
// daemon_main.go and warn users about the old environment variable nor the
// option in the configuration map have any effect.
func ReplaceDeprecatedFields(m map[string]interface{}) {
deprecatedFields := map[string]string{
"monitor-aggregation-level": MonitorAggregationName,
"ct-global-max-entries-tcp": CTMapEntriesGlobalTCPName,
"ct-global-max-entries-other": CTMapEntriesGlobalAnyName,
}
for deprecatedOption, newOption := range deprecatedFields {
if deprecatedValue, ok := m[deprecatedOption]; ok {
if _, ok := m[newOption]; !ok {
m[newOption] = deprecatedValue
}
}
}
}
func (c *DaemonConfig) parseExcludedLocalAddresses(s []string) error {
for _, ipString := range s {
_, ipnet, err := net.ParseCIDR(ipString)
if err != nil {
return fmt.Errorf("unable to parse excluded local address %s: %w", ipString, err)
}
c.ExcludeLocalAddresses = append(c.ExcludeLocalAddresses, ipnet)
}
return nil
}
// SetupLogging sets all logging-related options with the values from viper,
// then setup logging based on these options and the given tag.
//
// This allows initializing logging as early as possible, then log entries
// produced below in Populate can honor the requested logging configurations.
func (c *DaemonConfig) SetupLogging(vp *viper.Viper, tag string) {
c.Debug = vp.GetBool(DebugArg)
c.LogDriver = vp.GetStringSlice(LogDriver)
if m, err := command.GetStringMapStringE(vp, LogOpt); err != nil {
log.Fatalf("unable to parse %s: %s", LogOpt, err)
} else {
c.LogOpt = m
}
if err := logging.SetupLogging(c.LogDriver, logging.LogOptions(c.LogOpt), tag, c.Debug); err != nil {
log.Fatal(err)
}
}
// Populate sets all non-logging options with the values from viper.
//
// This function may emit logs. Consider calling SetupLogging before this
// to make sure that they honor logging-related options.
func (c *DaemonConfig) Populate(vp *viper.Viper) {
var err error
c.AgentHealthPort = vp.GetInt(AgentHealthPort)
c.ClusterHealthPort = vp.GetInt(ClusterHealthPort)
c.ClusterMeshHealthPort = vp.GetInt(ClusterMeshHealthPort)
c.AgentLabels = vp.GetStringSlice(AgentLabels)
c.AllowICMPFragNeeded = vp.GetBool(AllowICMPFragNeeded)
c.AllowLocalhost = vp.GetString(AllowLocalhost)
c.AnnotateK8sNode = vp.GetBool(AnnotateK8sNode)
c.ARPPingRefreshPeriod = vp.GetDuration(ARPPingRefreshPeriod)
c.EnableL2NeighDiscovery = vp.GetBool(EnableL2NeighDiscovery)
c.AutoCreateCiliumNodeResource = vp.GetBool(AutoCreateCiliumNodeResource)
c.BPFRoot = vp.GetString(BPFRoot)
c.CGroupRoot = vp.GetString(CGroupRoot)
c.ClusterID = vp.GetUint32(clustermeshTypes.OptClusterID)
c.ClusterName = vp.GetString(clustermeshTypes.OptClusterName)
c.MaxConnectedClusters = vp.GetUint32(clustermeshTypes.OptMaxConnectedClusters)
c.DatapathMode = vp.GetString(DatapathMode)
c.DebugVerbose = vp.GetStringSlice(DebugVerbose)
c.EnableIPv4 = vp.GetBool(EnableIPv4Name)
c.EnableIPv6 = vp.GetBool(EnableIPv6Name)
c.EnableIPv6NDP = vp.GetBool(EnableIPv6NDPName)
c.EnableSRv6 = vp.GetBool(EnableSRv6)
c.SRv6EncapMode = vp.GetString(SRv6EncapModeName)
c.EnableSCTP = vp.GetBool(EnableSCTPName)
c.IPv6MCastDevice = vp.GetString(IPv6MCastDevice)
c.EnableIPSec = vp.GetBool(EnableIPSecName)
c.EnableWireguard = vp.GetBool(EnableWireguard)
c.WireguardTrackAllIPsFallback = vp.GetBool(WireguardTrackAllIPsFallback)
c.EnableL2Announcements = vp.GetBool(EnableL2Announcements)
c.L2AnnouncerLeaseDuration = vp.GetDuration(L2AnnouncerLeaseDuration)
c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline)
c.L2AnnouncerRetryPeriod = vp.GetDuration(L2AnnouncerRetryPeriod)
c.WireguardPersistentKeepalive = vp.GetDuration(WireguardPersistentKeepalive)
c.EnableXDPPrefilter = vp.GetBool(EnableXDPPrefilter)
c.EnableTCX = vp.GetBool(EnableTCX)
c.DisableCiliumEndpointCRD = vp.GetBool(DisableCiliumEndpointCRDName)
c.MasqueradeInterfaces = vp.GetStringSlice(MasqueradeInterfaces)
c.BPFSocketLBHostnsOnly = vp.GetBool(BPFSocketLBHostnsOnly)
c.EnableSocketLB = vp.GetBool(EnableSocketLB)
c.EnableSocketLBTracing = vp.GetBool(EnableSocketLBTracing)
c.EnableSocketLBPodConnectionTermination = vp.GetBool(EnableSocketLBPodConnectionTermination)
c.EnableBPFTProxy = vp.GetBool(EnableBPFTProxy)
c.EnableAutoDirectRouting = vp.GetBool(EnableAutoDirectRoutingName)
c.DirectRoutingSkipUnreachable = vp.GetBool(DirectRoutingSkipUnreachableName)
c.EnableEndpointRoutes = vp.GetBool(EnableEndpointRoutes)
c.EnableHealthChecking = vp.GetBool(EnableHealthChecking)
c.EnableEndpointHealthChecking = vp.GetBool(EnableEndpointHealthChecking)
c.EnableHealthCheckNodePort = vp.GetBool(EnableHealthCheckNodePort)
c.EnableHealthCheckLoadBalancerIP = vp.GetBool(EnableHealthCheckLoadBalancerIP)
c.HealthCheckICMPFailureThreshold = vp.GetInt(HealthCheckICMPFailureThreshold)
c.EnableLocalNodeRoute = vp.GetBool(EnableLocalNodeRoute)
c.EnablePolicy = strings.ToLower(vp.GetString(EnablePolicy))
c.EnableExternalIPs = vp.GetBool(EnableExternalIPs)
c.EnableL7Proxy = vp.GetBool(EnableL7Proxy)
c.EnableTracing = vp.GetBool(EnableTracing)
c.EnableIPIPTermination = vp.GetBool(EnableIPIPTermination)
c.EnableUnreachableRoutes = vp.GetBool(EnableUnreachableRoutes)
c.EnableNodePort = vp.GetBool(EnableNodePort)
c.EnableSVCSourceRangeCheck = vp.GetBool(EnableSVCSourceRangeCheck)
c.EnableHostPort = vp.GetBool(EnableHostPort)
c.EnableHostLegacyRouting = vp.GetBool(EnableHostLegacyRouting)
c.NodePortBindProtection = vp.GetBool(NodePortBindProtection)
c.EnableAutoProtectNodePortRange = vp.GetBool(EnableAutoProtectNodePortRange)
c.KubeProxyReplacement = vp.GetString(KubeProxyReplacement)
c.EnableSessionAffinity = vp.GetBool(EnableSessionAffinity)
c.EnableRecorder = vp.GetBool(EnableRecorder)
c.EnableMKE = vp.GetBool(EnableMKE)
c.CgroupPathMKE = vp.GetString(CgroupPathMKE)
c.EnableHostFirewall = vp.GetBool(EnableHostFirewall)
c.EnableLocalRedirectPolicy = vp.GetBool(EnableLocalRedirectPolicy)
c.EncryptInterface = vp.GetStringSlice(EncryptInterface)
c.EncryptNode = vp.GetBool(EncryptNode)
c.IdentityChangeGracePeriod = vp.GetDuration(IdentityChangeGracePeriod)
c.IdentityRestoreGracePeriod = vp.GetDuration(IdentityRestoreGracePeriod)
c.IPAM = vp.GetString(IPAM)
c.IPAMDefaultIPPool = vp.GetString(IPAMDefaultIPPool)
c.IPv4Range = vp.GetString(IPv4Range)
c.IPv4NodeAddr = vp.GetString(IPv4NodeAddr)
c.IPv4ServiceRange = vp.GetString(IPv4ServiceRange)
c.IPv6ClusterAllocCIDR = vp.GetString(IPv6ClusterAllocCIDRName)
c.IPv6NodeAddr = vp.GetString(IPv6NodeAddr)
c.IPv6Range = vp.GetString(IPv6Range)
c.IPv6ServiceRange = vp.GetString(IPv6ServiceRange)
c.JoinCluster = vp.GetBool(JoinClusterName)
c.K8sRequireIPv4PodCIDR = vp.GetBool(K8sRequireIPv4PodCIDRName)
c.K8sRequireIPv6PodCIDR = vp.GetBool(K8sRequireIPv6PodCIDRName)
c.K8sServiceCacheSize = uint(vp.GetInt(K8sServiceCacheSize))
c.K8sServiceDebounceBufferSize = vp.GetInt(K8sServiceDebounceBufferSize)
c.K8sServiceDebounceWaitTime = vp.GetDuration(K8sServiceDebounceWaitTime)
c.K8sSyncTimeout = vp.GetDuration(K8sSyncTimeoutName)
c.AllocatorListTimeout = vp.GetDuration(AllocatorListTimeoutName)
c.K8sWatcherEndpointSelector = vp.GetString(K8sWatcherEndpointSelector)
c.KeepConfig = vp.GetBool(KeepConfig)
c.KVStore = vp.GetString(KVStore)
c.KVstoreLeaseTTL = vp.GetDuration(KVstoreLeaseTTL)
c.KVstorePeriodicSync = vp.GetDuration(KVstorePeriodicSync)
c.KVstoreConnectivityTimeout = vp.GetDuration(KVstoreConnectivityTimeout)
c.KVstorePodNetworkSupport = vp.GetBool(KVstorePodNetworkSupport)
c.KVstoreMaxConsecutiveQuorumErrors = vp.GetUint(KVstoreMaxConsecutiveQuorumErrorsName)
c.LabelPrefixFile = vp.GetString(LabelPrefixFile)
c.Labels = vp.GetStringSlice(Labels)
c.LibDir = vp.GetString(LibDir)
c.LogSystemLoadConfig = vp.GetBool(LogSystemLoadConfigName)
c.LoopbackIPv4 = vp.GetString(LoopbackIPv4)
c.LocalRouterIPv4 = vp.GetString(LocalRouterIPv4)
c.LocalRouterIPv6 = vp.GetString(LocalRouterIPv6)
c.EnableBPFClockProbe = vp.GetBool(EnableBPFClockProbe)
c.EnableIPMasqAgent = vp.GetBool(EnableIPMasqAgent)
c.EnableIPv4EgressGateway = vp.GetBool(EnableIPv4EgressGateway)
c.EnableEnvoyConfig = vp.GetBool(EnableEnvoyConfig)
c.IPMasqAgentConfigPath = vp.GetString(IPMasqAgentConfigPath)
c.InstallIptRules = vp.GetBool(InstallIptRules)
c.IPSecKeyFile = vp.GetString(IPSecKeyFileName)
c.IPsecKeyRotationDuration = vp.GetDuration(IPsecKeyRotationDuration)
c.EnableIPsecKeyWatcher = vp.GetBool(EnableIPsecKeyWatcher)
c.EnableIPSecXfrmStateCaching = vp.GetBool(EnableIPSecXfrmStateCaching)
c.MonitorAggregation = vp.GetString(MonitorAggregationName)
c.MonitorAggregationInterval = vp.GetDuration(MonitorAggregationInterval)
c.MTU = vp.GetInt(MTUName)
c.PreAllocateMaps = vp.GetBool(PreAllocateMapsName)
c.ProcFs = vp.GetString(ProcFs)
c.RestoreState = vp.GetBool(Restore)
c.RouteMetric = vp.GetInt(RouteMetric)
c.RunDir = vp.GetString(StateDir)
c.ExternalEnvoyProxy = vp.GetBool(ExternalEnvoyProxy)
c.SocketPath = vp.GetString(SocketPath)
c.TracePayloadlen = vp.GetInt(TracePayloadlen)
c.Version = vp.GetString(Version)
c.PolicyTriggerInterval = vp.GetDuration(PolicyTriggerInterval)
c.CTMapEntriesTimeoutTCP = vp.GetDuration(CTMapEntriesTimeoutTCPName)
c.CTMapEntriesTimeoutAny = vp.GetDuration(CTMapEntriesTimeoutAnyName)
c.CTMapEntriesTimeoutSVCTCP = vp.GetDuration(CTMapEntriesTimeoutSVCTCPName)
c.CTMapEntriesTimeoutSVCTCPGrace = vp.GetDuration(CTMapEntriesTimeoutSVCTCPGraceName)
c.CTMapEntriesTimeoutSVCAny = vp.GetDuration(CTMapEntriesTimeoutSVCAnyName)
c.CTMapEntriesTimeoutSYN = vp.GetDuration(CTMapEntriesTimeoutSYNName)
c.CTMapEntriesTimeoutFIN = vp.GetDuration(CTMapEntriesTimeoutFINName)
c.PolicyAuditMode = vp.GetBool(PolicyAuditModeArg)
c.PolicyAccounting = vp.GetBool(PolicyAccountingArg)
c.EnableIPv4FragmentsTracking = vp.GetBool(EnableIPv4FragmentsTrackingName)
c.FragmentsMapEntries = vp.GetInt(FragmentsMapEntriesName)
c.CRDWaitTimeout = vp.GetDuration(CRDWaitTimeout)
c.LoadBalancerDSRDispatch = vp.GetString(LoadBalancerDSRDispatch)
c.LoadBalancerRSSv4CIDR = vp.GetString(LoadBalancerRSSv4CIDR)
c.LoadBalancerRSSv6CIDR = vp.GetString(LoadBalancerRSSv6CIDR)
c.InstallNoConntrackIptRules = vp.GetBool(InstallNoConntrackIptRules)
c.ContainerIPLocalReservedPorts = vp.GetString(ContainerIPLocalReservedPorts)
c.EnableCustomCalls = vp.GetBool(EnableCustomCallsName)
c.BGPSecretsNamespace = vp.GetString(BGPSecretsNamespace)
c.ExternalClusterIP = vp.GetBool(ExternalClusterIPName)
c.EnableNat46X64Gateway = vp.GetBool(EnableNat46X64Gateway)
c.EnableIPv4Masquerade = vp.GetBool(EnableIPv4Masquerade) && c.EnableIPv4
c.EnableIPv6Masquerade = vp.GetBool(EnableIPv6Masquerade) && c.EnableIPv6
c.EnableBPFMasquerade = vp.GetBool(EnableBPFMasquerade)
c.EnableMasqueradeRouteSource = vp.GetBool(EnableMasqueradeRouteSource)
c.EnablePMTUDiscovery = vp.GetBool(EnablePMTUDiscovery)
c.IPv6NAT46x64CIDR = defaults.IPv6NAT46x64CIDR
c.IPAMCiliumNodeUpdateRate = vp.GetDuration(IPAMCiliumNodeUpdateRate)
c.BPFEventsDropEnabled = vp.GetBool(BPFEventsDropEnabled)
c.BPFEventsPolicyVerdictEnabled = vp.GetBool(BPFEventsPolicyVerdictEnabled)
c.BPFEventsTraceEnabled = vp.GetBool(BPFEventsTraceEnabled)
c.BPFConntrackAccounting = vp.GetBool(BPFConntrackAccounting)
c.EnableIPSecEncryptedOverlay = vp.GetBool(EnableIPSecEncryptedOverlay)
c.LBSourceRangeAllTypes = vp.GetBool(LBSourceRangeAllTypes)
c.BootIDFile = vp.GetString(BootIDFilename)
c.ServiceNoBackendResponse = vp.GetString(ServiceNoBackendResponse)
switch c.ServiceNoBackendResponse {
case ServiceNoBackendResponseReject, ServiceNoBackendResponseDrop:
case "":
c.ServiceNoBackendResponse = defaults.ServiceNoBackendResponse
default:
log.Fatalf("Invalid value for --%s: %s (must be 'reject' or 'drop')", ServiceNoBackendResponse, c.ServiceNoBackendResponse)
}
c.populateLoadBalancerSettings(vp)
c.EnableRuntimeDeviceDetection = vp.GetBool(EnableRuntimeDeviceDetection)
c.EgressMultiHomeIPRuleCompat = vp.GetBool(EgressMultiHomeIPRuleCompat)
c.InstallUplinkRoutesForDelegatedIPAM = vp.GetBool(InstallUplinkRoutesForDelegatedIPAM)
vlanBPFBypassIDs := vp.GetStringSlice(VLANBPFBypass)
c.VLANBPFBypass = make([]int, 0, len(vlanBPFBypassIDs))
for _, vlanIDStr := range vlanBPFBypassIDs {
vlanID, err := strconv.Atoi(vlanIDStr)
if err != nil {
log.WithError(err).Fatalf("Cannot parse vlan ID integer from --%s option", VLANBPFBypass)
}
c.VLANBPFBypass = append(c.VLANBPFBypass, vlanID)
}
c.DisableExternalIPMitigation = vp.GetBool(DisableExternalIPMitigation)
tcFilterPrio := vp.GetUint32(TCFilterPriority)
if tcFilterPrio > math.MaxUint16 {
log.Fatalf("%s cannot be higher than %d", TCFilterPriority, math.MaxUint16)
}
c.TCFilterPriority = uint16(tcFilterPrio)
c.RoutingMode = vp.GetString(RoutingMode)
if vp.IsSet(AddressScopeMax) {
c.AddressScopeMax, err = ip.ParseScope(vp.GetString(AddressScopeMax))
if err != nil {
log.WithError(err).Fatalf("Cannot parse scope integer from --%s option", AddressScopeMax)
}
} else {
c.AddressScopeMax = defaults.AddressScopeMax
}
if c.EnableNat46X64Gateway {
if !c.EnableIPv4 || !c.EnableIPv6 {
log.Fatalf("--%s requires both --%s and --%s enabled",
EnableNat46X64Gateway, EnableIPv4Name, EnableIPv6Name)
}
}
encryptionStrictModeEnabled := vp.GetBool(EnableEncryptionStrictMode)
if encryptionStrictModeEnabled {
if c.EnableIPv6 {
log.Info("WireGuard encryption strict mode only supports IPv4. IPv6 traffic is not protected and can be leaked.")
}
strictCIDR := vp.GetString(EncryptionStrictModeCIDR)
c.EncryptionStrictModeCIDR, err = netip.ParsePrefix(strictCIDR)
if err != nil {
log.WithError(err).Fatalf("Cannot parse CIDR %s from --%s option", strictCIDR, EncryptionStrictModeCIDR)
}
if !c.EncryptionStrictModeCIDR.Addr().Is4() {
log.Fatalf("%s must be an IPv4 CIDR", EncryptionStrictModeCIDR)
}
c.EncryptionStrictModeAllowRemoteNodeIdentities = vp.GetBool(EncryptionStrictModeAllowRemoteNodeIdentities)
c.EnableEncryptionStrictMode = encryptionStrictModeEnabled
}
ipv4NativeRoutingCIDR := vp.GetString(IPv4NativeRoutingCIDR)
if ipv4NativeRoutingCIDR != "" {
c.IPv4NativeRoutingCIDR, err = cidr.ParseCIDR(ipv4NativeRoutingCIDR)
if err != nil {
log.WithError(err).Fatalf("Unable to parse CIDR '%s'", ipv4NativeRoutingCIDR)
}
if len(c.IPv4NativeRoutingCIDR.IP) != net.IPv4len {
log.Fatalf("%s must be an IPv4 CIDR", IPv4NativeRoutingCIDR)
}
}
ipv6NativeRoutingCIDR := vp.GetString(IPv6NativeRoutingCIDR)
if ipv6NativeRoutingCIDR != "" {
c.IPv6NativeRoutingCIDR, err = cidr.ParseCIDR(ipv6NativeRoutingCIDR)
if err != nil {
log.WithError(err).Fatalf("Unable to parse CIDR '%s'", ipv6NativeRoutingCIDR)
}
if len(c.IPv6NativeRoutingCIDR.IP) != net.IPv6len {
log.Fatalf("%s must be an IPv6 CIDR", IPv6NativeRoutingCIDR)
}
}
if c.DirectRoutingSkipUnreachable && !c.EnableAutoDirectRouting {
log.Fatalf("Flag %s cannot be enabled when %s is not enabled. As if %s is then enabled, it may lead to unexpected behaviour causing network connectivity issues.", DirectRoutingSkipUnreachableName, EnableAutoDirectRoutingName, EnableAutoDirectRoutingName)
}
if err := c.calculateBPFMapSizes(vp); err != nil {
log.Fatal(err)
}
c.ClockSource = ClockSourceKtime
c.EnableIdentityMark = vp.GetBool(EnableIdentityMark)
// toFQDNs options
c.DNSMaxIPsPerRestoredRule = vp.GetInt(DNSMaxIPsPerRestoredRule)
c.DNSPolicyUnloadOnShutdown = vp.GetBool(DNSPolicyUnloadOnShutdown)
c.FQDNRegexCompileLRUSize = vp.GetInt(FQDNRegexCompileLRUSize)
c.ToFQDNsMaxIPsPerHost = vp.GetInt(ToFQDNsMaxIPsPerHost)
if maxZombies := vp.GetInt(ToFQDNsMaxDeferredConnectionDeletes); maxZombies >= 0 {
c.ToFQDNsMaxDeferredConnectionDeletes = vp.GetInt(ToFQDNsMaxDeferredConnectionDeletes)
} else {
log.Fatalf("%s must be positive, or 0 to disable deferred connection deletion",
ToFQDNsMaxDeferredConnectionDeletes)
}
switch {
case vp.IsSet(ToFQDNsMinTTL): // set by user
c.ToFQDNsMinTTL = vp.GetInt(ToFQDNsMinTTL)
default:
c.ToFQDNsMinTTL = defaults.ToFQDNsMinTTL
}
c.ToFQDNsProxyPort = vp.GetInt(ToFQDNsProxyPort)
c.ToFQDNsPreCache = vp.GetString(ToFQDNsPreCache)
c.ToFQDNsEnableDNSCompression = vp.GetBool(ToFQDNsEnableDNSCompression)
c.ToFQDNsIdleConnectionGracePeriod = vp.GetDuration(ToFQDNsIdleConnectionGracePeriod)
c.FQDNProxyResponseMaxDelay = vp.GetDuration(FQDNProxyResponseMaxDelay)
c.DNSProxyConcurrencyLimit = vp.GetInt(DNSProxyConcurrencyLimit)
c.DNSProxyConcurrencyProcessingGracePeriod = vp.GetDuration(DNSProxyConcurrencyProcessingGracePeriod)
c.DNSProxyEnableTransparentMode = vp.GetBool(DNSProxyEnableTransparentMode)
c.DNSProxyInsecureSkipTransparentModeCheck = vp.GetBool(DNSProxyInsecureSkipTransparentModeCheck)
c.DNSProxyLockCount = vp.GetInt(DNSProxyLockCount)
c.DNSProxyLockTimeout = vp.GetDuration(DNSProxyLockTimeout)
c.DNSProxySocketLingerTimeout = vp.GetInt(DNSProxySocketLingerTimeout)
c.FQDNRejectResponse = vp.GetString(FQDNRejectResponseCode)
// Convert IP strings into net.IPNet types
subnets, invalid := ip.ParseCIDRs(vp.GetStringSlice(IPv4PodSubnets))
if len(invalid) > 0 {
log.WithFields(
logrus.Fields{
"Subnets": invalid,
}).Warning("IPv4PodSubnets parameter can not be parsed.")
}
c.IPv4PodSubnets = subnets
subnets, invalid = ip.ParseCIDRs(vp.GetStringSlice(IPv6PodSubnets))
if len(invalid) > 0 {
log.WithFields(
logrus.Fields{
"Subnets": invalid,
}).Warning("IPv6PodSubnets parameter can not be parsed.")
}
c.IPv6PodSubnets = subnets
err = c.populateNodePortRange(vp)
if err != nil {
log.WithError(err).Fatal("Failed to populate NodePortRange")
}
monitorAggregationFlags := vp.GetStringSlice(MonitorAggregationFlags)
var ctMonitorReportFlags uint16
for i := 0; i < len(monitorAggregationFlags); i++ {
value := strings.ToLower(monitorAggregationFlags[i])
flag, exists := TCPFlags[value]
if !exists {
log.Fatalf("Unable to parse TCP flag %q for %s!",
value, MonitorAggregationFlags)
}
ctMonitorReportFlags |= flag
}
c.MonitorAggregationFlags = ctMonitorReportFlags
// Map options
if m := command.GetStringMapString(vp, FixedIdentityMapping); err != nil {
log.Fatalf("unable to parse %s: %s", FixedIdentityMapping, err)
} else if len(m) != 0 {
c.FixedIdentityMapping = m
}
if m := command.GetStringMapString(vp, FixedZoneMapping); err != nil {
log.Fatalf("unable to parse %s: %s", FixedZoneMapping, err)
} else if len(m) != 0 {
forward := make(map[string]uint8, len(m))
reverse := make(map[uint8]string, len(m))
for k, v := range m {
bigN, _ := strconv.Atoi(v)
n := uint8(bigN)
if oldKey, ok := reverse[n]; ok && oldKey != k {
log.Fatalf("duplicate numeric ID entry for %s: %q and %q map to the same value %d", FixedZoneMapping, oldKey, k, n)
}
if oldN, ok := forward[k]; ok && oldN != n {
log.Fatalf("duplicate zone name entry for %s: %d and %d map to different values %s", FixedZoneMapping, oldN, n, k)
}
forward[k] = n
reverse[n] = k
}
c.FixedZoneMapping = forward
c.ReverseFixedZoneMapping = reverse
}
c.ConntrackGCInterval = vp.GetDuration(ConntrackGCInterval)
c.ConntrackGCMaxInterval = vp.GetDuration(ConntrackGCMaxInterval)
if m, err := command.GetStringMapStringE(vp, KVStoreOpt); err != nil {
log.Fatalf("unable to parse %s: %s", KVStoreOpt, err)
} else {
c.KVStoreOpt = m
}
bpfEventsDefaultRateLimit := vp.GetUint32(BPFEventsDefaultRateLimit)
bpfEventsDefaultBurstLimit := vp.GetUint32(BPFEventsDefaultBurstLimit)
switch {
case bpfEventsDefaultRateLimit > 0 && bpfEventsDefaultBurstLimit == 0:
log.Fatalf("invalid BPF events default config: burst limit must also be specified when rate limit is provided")
case bpfEventsDefaultRateLimit == 0 && bpfEventsDefaultBurstLimit > 0:
log.Fatalf("invalid BPF events default config: rate limit must also be specified when burst limit is provided")
default:
c.BPFEventsDefaultRateLimit = vp.GetUint32(BPFEventsDefaultRateLimit)
c.BPFEventsDefaultBurstLimit = vp.GetUint32(BPFEventsDefaultBurstLimit)
}
c.bpfMapEventConfigs = make(BPFEventBufferConfigs)
parseBPFMapEventConfigs(c.bpfMapEventConfigs, defaults.BPFEventBufferConfigs)
if m, err := command.GetStringMapStringE(vp, BPFMapEventBuffers); err != nil {
log.Fatalf("unable to parse %s: %s", BPFMapEventBuffers, err)
} else {
parseBPFMapEventConfigs(c.bpfMapEventConfigs, m)
}
c.NodeEncryptionOptOutLabelsString = vp.GetString(NodeEncryptionOptOutLabels)
if sel, err := k8sLabels.Parse(c.NodeEncryptionOptOutLabelsString); err != nil {
log.Fatalf("unable to parse label selector %s: %s", NodeEncryptionOptOutLabels, err)
} else {
c.NodeEncryptionOptOutLabels = sel
}
if err := c.parseExcludedLocalAddresses(vp.GetStringSlice(ExcludeLocalAddress)); err != nil {
log.WithError(err).Fatalf("Unable to parse excluded local addresses")
}
// Ensure CiliumEndpointSlice is enabled only if CiliumEndpointCRD is enabled too.
c.EnableCiliumEndpointSlice = vp.GetBool(EnableCiliumEndpointSlice)
if c.EnableCiliumEndpointSlice && c.DisableCiliumEndpointCRD {
log.Fatalf("Running Cilium with %s=%t requires %s set to false to enable CiliumEndpoint CRDs.",
EnableCiliumEndpointSlice, c.EnableCiliumEndpointSlice, DisableCiliumEndpointCRDName)
}
// To support K8s NetworkPolicy
c.EnableK8sNetworkPolicy = vp.GetBool(EnableK8sNetworkPolicy)
c.PolicyCIDRMatchMode = vp.GetStringSlice(PolicyCIDRMatchMode)
c.EnableNodeSelectorLabels = vp.GetBool(EnableNodeSelectorLabels)
c.NodeLabels = vp.GetStringSlice(NodeLabels)
c.EnableCiliumNetworkPolicy = vp.GetBool(EnableCiliumNetworkPolicy)
c.EnableCiliumClusterwideNetworkPolicy = vp.GetBool(EnableCiliumClusterwideNetworkPolicy)
c.IdentityAllocationMode = vp.GetString(IdentityAllocationMode)
switch c.IdentityAllocationMode {
// This is here for tests. Some call Populate without the normal init
case "":
c.IdentityAllocationMode = IdentityAllocationModeKVstore
case IdentityAllocationModeKVstore, IdentityAllocationModeCRD, IdentityAllocationModeDoubleWriteReadKVstore, IdentityAllocationModeDoubleWriteReadCRD:
// c.IdentityAllocationMode is set above
default:
log.Fatalf("Invalid identity allocation mode %q. It must be one of %s, %s or %s / %s", c.IdentityAllocationMode, IdentityAllocationModeKVstore, IdentityAllocationModeCRD, IdentityAllocationModeDoubleWriteReadKVstore, IdentityAllocationModeDoubleWriteReadCRD)
}
if c.KVStore == "" {
if c.IdentityAllocationMode != IdentityAllocationModeCRD {
log.Warningf("Running Cilium with %q=%q requires identity allocation via CRDs. Changing %s to %q", KVStore, c.KVStore, IdentityAllocationMode, IdentityAllocationModeCRD)
c.IdentityAllocationMode = IdentityAllocationModeCRD
}
if c.DisableCiliumEndpointCRD && NetworkPolicyEnabled(c) {
log.Warningf("Running Cilium with %q=%q requires endpoint CRDs when network policy enforcement system is enabled. Changing %s to %t", KVStore, c.KVStore, DisableCiliumEndpointCRDName, false)
c.DisableCiliumEndpointCRD = false
}
}
switch c.IPAM {
case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool:
if c.EnableIPv4 {
c.K8sRequireIPv4PodCIDR = true
}
if c.EnableIPv6 {
c.K8sRequireIPv6PodCIDR = true
}
}
if m, err := command.GetStringMapStringE(vp, IPAMMultiPoolPreAllocation); err != nil {
log.Fatalf("unable to parse %s: %s", IPAMMultiPoolPreAllocation, err)
} else {
c.IPAMMultiPoolPreAllocation = m
}
if len(c.IPAMMultiPoolPreAllocation) == 0 {
// Default to the same value as IPAMDefaultIPPool
c.IPAMMultiPoolPreAllocation = map[string]string{c.IPAMDefaultIPPool: "8"}
}
c.KubeProxyReplacementHealthzBindAddr = vp.GetString(KubeProxyReplacementHealthzBindAddr)
// Hidden options
c.CompilerFlags = vp.GetStringSlice(CompilerFlags)
c.ConfigFile = vp.GetString(ConfigFile)
c.HTTP403Message = vp.GetString(HTTP403Message)
c.K8sNamespace = vp.GetString(K8sNamespaceName)
c.AgentNotReadyNodeTaintKey = vp.GetString(AgentNotReadyNodeTaintKeyName)
c.MaxControllerInterval = vp.GetInt(MaxCtrlIntervalName)
c.EndpointQueueSize = sanitizeIntParam(vp, EndpointQueueSize, defaults.EndpointQueueSize)
c.EnableICMPRules = vp.GetBool(EnableICMPRules)
c.UseCiliumInternalIPForIPsec = vp.GetBool(UseCiliumInternalIPForIPsec)
c.BypassIPAvailabilityUponRestore = vp.GetBool(BypassIPAvailabilityUponRestore)
c.EnableK8sTerminatingEndpoint = vp.GetBool(EnableK8sTerminatingEndpoint)
// VTEP integration enable option
c.EnableVTEP = vp.GetBool(EnableVTEP)
// Enable BGP control plane features
c.EnableBGPControlPlane = vp.GetBool(EnableBGPControlPlane)
// Enable BGP control plane status reporting
c.EnableBGPControlPlaneStatusReport = vp.GetBool(EnableBGPControlPlaneStatusReport)
// BGP router-id allocation mode in IPv6 standalone environment
c.BGPRouterIDAllocationMode = vp.GetString(BGPRouterIDAllocationMode)
// Support failure-mode for policy map overflow
c.EnableEndpointLockdownOnPolicyOverflow = vp.GetBool(EnableEndpointLockdownOnPolicyOverflow)
// Parse node label patterns
nodeLabelPatterns := vp.GetStringSlice(ExcludeNodeLabelPatterns)
for _, pattern := range nodeLabelPatterns {
r, err := regexp.Compile(pattern)
if err != nil {
log.WithError(err).Errorf("Unable to compile exclude node label regex pattern %s", pattern)
continue
}
c.ExcludeNodeLabelPatterns = append(c.ExcludeNodeLabelPatterns, r)
}
if c.KVStore != "" {
c.IdentityRestoreGracePeriod = defaults.IdentityRestoreGracePeriodKvstore
}
c.LoadBalancerProtocolDifferentiation = vp.GetBool(LoadBalancerProtocolDifferentiation)
c.EnableInternalTrafficPolicy = vp.GetBool(EnableInternalTrafficPolicy)
c.EnableSourceIPVerification = vp.GetBool(EnableSourceIPVerification)
}
func (c *DaemonConfig) populateLoadBalancerSettings(vp *viper.Viper) {
c.NodePortAcceleration = vp.GetString(LoadBalancerAcceleration)
c.NodePortMode = vp.GetString(LoadBalancerMode)
c.LoadBalancerModeAnnotation = vp.GetBool(LoadBalancerModeAnnotation)
c.NodePortAlg = vp.GetString(LoadBalancerAlgorithm)
c.LoadBalancerAlgorithmAnnotation = vp.GetBool(LoadBalancerAlgorithmAnnotation)
// If old settings were explicitly set by the user, then have them
// override the new ones in order to not break existing setups.
if vp.IsSet(NodePortAcceleration) {
prior := c.NodePortAcceleration
c.NodePortAcceleration = vp.GetString(NodePortAcceleration)
if vp.IsSet(LoadBalancerAcceleration) && prior != c.NodePortAcceleration {
log.Fatalf("Both --%s and --%s were set. Only use --%s instead.",
LoadBalancerAcceleration, NodePortAcceleration, LoadBalancerAcceleration)
}
}
if vp.IsSet(NodePortMode) {
prior := c.NodePortMode
c.NodePortMode = vp.GetString(NodePortMode)
if vp.IsSet(LoadBalancerMode) && prior != c.NodePortMode {
log.Fatalf("Both --%s and --%s were set. Only use --%s instead.",
LoadBalancerMode, NodePortMode, LoadBalancerMode)
}
}
if vp.IsSet(NodePortAlg) {
prior := c.NodePortAlg
c.NodePortAlg = vp.GetString(NodePortAlg)
if vp.IsSet(LoadBalancerAlgorithm) && prior != c.NodePortAlg {
log.Fatalf("Both --%s and --%s were set. Only use --%s instead.",
LoadBalancerAlgorithm, NodePortAlg, LoadBalancerAlgorithm)
}
}
}
func (c *DaemonConfig) populateNodePortRange(vp *viper.Viper) error {
nodePortRange := vp.GetStringSlice(NodePortRange)
// When passed via configmap, we might not get a slice but single
// string instead, so split it if needed.
if len(nodePortRange) == 1 {
nodePortRange = strings.Split(nodePortRange[0], ",")
}
switch len(nodePortRange) {
case 2:
var err error
c.NodePortMin, err = strconv.Atoi(nodePortRange[0])
if err != nil {
return fmt.Errorf("Unable to parse min port value for NodePort range: %w", err)
}
c.NodePortMax, err = strconv.Atoi(nodePortRange[1])
if err != nil {
return fmt.Errorf("Unable to parse max port value for NodePort range: %w", err)
}
if c.NodePortMax <= c.NodePortMin {
return errors.New("NodePort range min port must be smaller than max port")
}
case 0:
if vp.IsSet(NodePortRange) {
log.Warning("NodePort range was set but is empty.")
}
default:
return fmt.Errorf("Unable to parse min/max port value for NodePort range: %s", NodePortRange)
}
return nil
}
func (c *DaemonConfig) checkMapSizeLimits() error {
if c.AuthMapEntries < AuthMapEntriesMin {
return fmt.Errorf("specified AuthMap max entries %d must exceed minimum %d", c.AuthMapEntries, AuthMapEntriesMin)
}
if c.AuthMapEntries > AuthMapEntriesMax {
return fmt.Errorf("specified AuthMap max entries %d must not exceed maximum %d", c.AuthMapEntries, AuthMapEntriesMax)
}
if c.CTMapEntriesGlobalTCP < LimitTableMin || c.CTMapEntriesGlobalAny < LimitTableMin {
return fmt.Errorf("specified CT tables values %d/%d must exceed minimum %d",
c.CTMapEntriesGlobalTCP, c.CTMapEntriesGlobalAny, LimitTableMin)
}
if c.CTMapEntriesGlobalTCP > LimitTableMax || c.CTMapEntriesGlobalAny > LimitTableMax {
return fmt.Errorf("specified CT tables values %d/%d must not exceed maximum %d",
c.CTMapEntriesGlobalTCP, c.CTMapEntriesGlobalAny, LimitTableMax)
}
if c.NATMapEntriesGlobal < LimitTableMin {
return fmt.Errorf("specified NAT table size %d must exceed minimum %d",
c.NATMapEntriesGlobal, LimitTableMin)
}
if c.NATMapEntriesGlobal > LimitTableMax {
return fmt.Errorf("specified NAT tables size %d must not exceed maximum %d",
c.NATMapEntriesGlobal, LimitTableMax)
}
if c.NATMapEntriesGlobal > c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny {
if c.NATMapEntriesGlobal == NATMapEntriesGlobalDefault {
// Auto-size for the case where CT table size was adapted but NAT still on default
c.NATMapEntriesGlobal = int((c.CTMapEntriesGlobalTCP + c.CTMapEntriesGlobalAny) * 2 / 3)
} else {
return fmt.Errorf("specified NAT tables size %d must not exceed maximum CT table size %d",
c.NATMapEntriesGlobal, c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny)
}
}
if c.SockRevNatEntries < LimitTableMin {
return fmt.Errorf("specified Socket Reverse NAT table size %d must exceed minimum %d",
c.SockRevNatEntries, LimitTableMin)
}
if c.SockRevNatEntries > LimitTableMax {
return fmt.Errorf("specified Socket Reverse NAT tables size %d must not exceed maximum %d",
c.SockRevNatEntries, LimitTableMax)
}
if c.PolicyMapEntries < PolicyMapMin {
return fmt.Errorf("specified PolicyMap max entries %d must exceed minimum %d",
c.PolicyMapEntries, PolicyMapMin)
}
if c.PolicyMapEntries > PolicyMapMax {
log.Warnf("specified PolicyMap max entries %d must not exceed maximum %d, lowering it to the maximum value",
c.PolicyMapEntries, PolicyMapMax)
c.PolicyMapEntries = PolicyMapMax
}
if c.FragmentsMapEntries < FragmentsMapMin {
return fmt.Errorf("specified max entries %d for fragment-tracking map must exceed minimum %d",
c.FragmentsMapEntries, FragmentsMapMin)
}
if c.FragmentsMapEntries > FragmentsMapMax {
return fmt.Errorf("specified max entries %d for fragment-tracking map must not exceed maximum %d",
c.FragmentsMapEntries, FragmentsMapMax)
}
if c.LBMapEntries <= 0 {
return fmt.Errorf("specified LBMap max entries %d must be a value greater than 0", c.LBMapEntries)
}
if c.LBServiceMapEntries < 0 ||
c.LBBackendMapEntries < 0 ||
c.LBRevNatEntries < 0 ||
c.LBAffinityMapEntries < 0 ||
c.LBSourceRangeMapEntries < 0 ||
c.LBMaglevMapEntries < 0 {
return fmt.Errorf("specified LB Service Map max entries must not be a negative value"+
"(Service Map: %d, Service Backend: %d, Reverse NAT: %d, Session Affinity: %d, Source Range: %d, Maglev: %d)",
c.LBServiceMapEntries,
c.LBBackendMapEntries,
c.LBRevNatEntries,
c.LBAffinityMapEntries,
c.LBSourceRangeMapEntries,
c.LBMaglevMapEntries)
}
return nil
}
func (c *DaemonConfig) checkIPv4NativeRoutingCIDR() error {
if c.IPv4NativeRoutingCIDR != nil {
return nil
}
if !c.EnableIPv4 || !c.EnableIPv4Masquerade {
return nil
}
if c.EnableIPMasqAgent {
return nil
}
if c.TunnelingEnabled() {
return nil
}
if c.IPAMMode() == ipamOption.IPAMENI || c.IPAMMode() == ipamOption.IPAMAlibabaCloud {
return nil
}
return fmt.Errorf(
"native routing cidr must be configured with option --%s "+
"in combination with --%s=true --%s=true --%s=false --%s=%s --%s=%s",
IPv4NativeRoutingCIDR,
EnableIPv4Name, EnableIPv4Masquerade,
EnableIPMasqAgent,
RoutingMode, RoutingModeNative,
IPAM, c.IPAMMode())
}
func (c *DaemonConfig) checkIPv6NativeRoutingCIDR() error {
if c.IPv6NativeRoutingCIDR != nil {
return nil
}
if !c.EnableIPv6 || !c.EnableIPv6Masquerade {
return nil
}
if c.EnableIPMasqAgent {
return nil
}
if c.TunnelingEnabled() {
return nil
}
return fmt.Errorf(
"native routing cidr must be configured with option --%s "+
"in combination with --%s=true --%s=true --%s=false --%s=%s",
IPv6NativeRoutingCIDR,
EnableIPv6Name, EnableIPv6Masquerade,
EnableIPMasqAgent,
RoutingMode, RoutingModeNative)
}
func (c *DaemonConfig) checkIPAMDelegatedPlugin() error {
if c.IPAM == ipamOption.IPAMDelegatedPlugin {
// When using IPAM delegated plugin, IP addresses are allocated by the CNI binary,
// not the daemon. Therefore, features which require the daemon to allocate IPs for itself
// must be disabled.
if c.EnableIPv4 && c.LocalRouterIPv4 == "" {
return fmt.Errorf("--%s must be provided when IPv4 is enabled with --%s=%s", LocalRouterIPv4, IPAM, ipamOption.IPAMDelegatedPlugin)
}
if c.EnableIPv6 && c.LocalRouterIPv6 == "" {
return fmt.Errorf("--%s must be provided when IPv6 is enabled with --%s=%s", LocalRouterIPv6, IPAM, ipamOption.IPAMDelegatedPlugin)
}
if c.EnableEndpointHealthChecking {
return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEndpointHealthChecking, IPAM, ipamOption.IPAMDelegatedPlugin)
}
// envoy config (Ingress, Gateway API, ...) require cilium-agent to create an IP address
// specifically for differentiating envoy traffic, which is not possible
// with delegated IPAM.
if c.EnableEnvoyConfig {
return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEnvoyConfig, IPAM, ipamOption.IPAMDelegatedPlugin)
}
}
return nil
}
func (c *DaemonConfig) calculateBPFMapSizes(vp *viper.Viper) error {
// BPF map size options
// Any map size explicitly set via option will override the dynamic
// sizing.
c.AuthMapEntries = vp.GetInt(AuthMapEntriesName)
c.CTMapEntriesGlobalTCP = vp.GetInt(CTMapEntriesGlobalTCPName)
c.CTMapEntriesGlobalAny = vp.GetInt(CTMapEntriesGlobalAnyName)
c.NATMapEntriesGlobal = vp.GetInt(NATMapEntriesGlobalName)
c.NeighMapEntriesGlobal = vp.GetInt(NeighMapEntriesGlobalName)
c.PolicyMapEntries = vp.GetInt(PolicyMapEntriesName)
c.PolicyMapFullReconciliationInterval = vp.GetDuration(PolicyMapFullReconciliationIntervalName)
c.SockRevNatEntries = vp.GetInt(SockRevNatEntriesName)
c.LBMapEntries = vp.GetInt(LBMapEntriesName)
c.LBServiceMapEntries = vp.GetInt(LBServiceMapMaxEntries)
c.LBBackendMapEntries = vp.GetInt(LBBackendMapMaxEntries)
c.LBRevNatEntries = vp.GetInt(LBRevNatMapMaxEntries)
c.LBAffinityMapEntries = vp.GetInt(LBAffinityMapMaxEntries)
c.LBSourceRangeMapEntries = vp.GetInt(LBSourceRangeMapMaxEntries)
c.LBMaglevMapEntries = vp.GetInt(LBMaglevMapMaxEntries)
// Don't attempt dynamic sizing if any of the sizeof members was not
// populated by the daemon (or any other caller).
if c.SizeofCTElement == 0 ||
c.SizeofNATElement == 0 ||
c.SizeofNeighElement == 0 ||
c.SizeofSockRevElement == 0 {
return nil
}
// Allow the range (0.0, 1.0] because the dynamic size will anyway be
// clamped to the table limits. Thus, a ratio of e.g. 0.98 will not lead
// to 98% of the total memory being allocated for BPF maps.
dynamicSizeRatio := vp.GetFloat64(MapEntriesGlobalDynamicSizeRatioName)
if 0.0 < dynamicSizeRatio && dynamicSizeRatio <= 1.0 {
vms, err := memory.Get()
if err != nil || vms == nil {
log.WithError(err).Fatal("Failed to get system memory")
}
c.calculateDynamicBPFMapSizes(vp, vms.Total, dynamicSizeRatio)
c.BPFMapsDynamicSizeRatio = dynamicSizeRatio
} else if dynamicSizeRatio < 0.0 {
return fmt.Errorf("specified dynamic map size ratio %f must be > 0.0", dynamicSizeRatio)
} else if dynamicSizeRatio > 1.0 {
return fmt.Errorf("specified dynamic map size ratio %f must be ≤ 1.0", dynamicSizeRatio)
}
return nil
}
// SetMapElementSizes sets the BPF map element sizes (key + value) used for
// dynamic BPF map size calculations in calculateDynamicBPFMapSizes.
func (c *DaemonConfig) SetMapElementSizes(
sizeofCTElement,
sizeofNATElement,
sizeofNeighElement,
sizeofSockRevElement int) {
c.SizeofCTElement = sizeofCTElement
c.SizeofNATElement = sizeofNATElement
c.SizeofNeighElement = sizeofNeighElement
c.SizeofSockRevElement = sizeofSockRevElement
}
func (c *DaemonConfig) calculateDynamicBPFMapSizes(vp *viper.Viper, totalMemory uint64, dynamicSizeRatio float64) {
// Heuristic:
// Distribute relative to map default entries among the different maps.
// Cap each map size by the maximum. Map size provided by the user will
// override the calculated value and also the max. There will be a check
// for maximum size later on in DaemonConfig.Validate()
//
// Calculation examples:
//
// Memory CT TCP CT Any NAT
//
// 512MB 33140 16570 33140
// 1GB 66280 33140 66280
// 4GB 265121 132560 265121
// 16GB 1060485 530242 1060485
memoryAvailableForMaps := int(float64(totalMemory) * dynamicSizeRatio)
log.Infof("Memory available for map entries (%.3f%% of %dB): %dB", dynamicSizeRatio*100, totalMemory, memoryAvailableForMaps)
totalMapMemoryDefault := CTMapEntriesGlobalTCPDefault*c.SizeofCTElement +
CTMapEntriesGlobalAnyDefault*c.SizeofCTElement +
NATMapEntriesGlobalDefault*c.SizeofNATElement +
// Neigh table has the same number of entries as NAT Map has.
NATMapEntriesGlobalDefault*c.SizeofNeighElement +
SockRevNATMapEntriesDefault*c.SizeofSockRevElement
log.Debugf("Total memory for default map entries: %d", totalMapMemoryDefault)
getEntries := func(entriesDefault, min, max int) int {
entries := (entriesDefault * memoryAvailableForMaps) / totalMapMemoryDefault
if entries < min {
entries = min
} else if entries > max {
log.Debugf("clamped from %d to %d", entries, max)
entries = max
}
return entries
}
// If value for a particular map was explicitly set by an
// option, disable dynamic sizing for this map and use the
// provided size.
if !vp.IsSet(CTMapEntriesGlobalTCPName) {
c.CTMapEntriesGlobalTCP =
getEntries(CTMapEntriesGlobalTCPDefault, LimitTableAutoGlobalTCPMin, LimitTableMax)
log.Infof("option %s set by dynamic sizing to %v",
CTMapEntriesGlobalTCPName, c.CTMapEntriesGlobalTCP)
} else {
log.Debugf("option %s set by user to %v", CTMapEntriesGlobalTCPName, c.CTMapEntriesGlobalTCP)
}
if !vp.IsSet(CTMapEntriesGlobalAnyName) {
c.CTMapEntriesGlobalAny =
getEntries(CTMapEntriesGlobalAnyDefault, LimitTableAutoGlobalAnyMin, LimitTableMax)
log.Infof("option %s set by dynamic sizing to %v",
CTMapEntriesGlobalAnyName, c.CTMapEntriesGlobalAny)
} else {
log.Debugf("option %s set by user to %v", CTMapEntriesGlobalAnyName, c.CTMapEntriesGlobalAny)
}
if !vp.IsSet(NATMapEntriesGlobalName) {
c.NATMapEntriesGlobal =
getEntries(NATMapEntriesGlobalDefault, LimitTableAutoNatGlobalMin, LimitTableMax)
log.Infof("option %s set by dynamic sizing to %v",
NATMapEntriesGlobalName, c.NATMapEntriesGlobal)
if c.NATMapEntriesGlobal > c.CTMapEntriesGlobalTCP+c.CTMapEntriesGlobalAny {
// CT table size was specified manually, make sure that the NAT table size
// does not exceed maximum CT table size. See
// (*DaemonConfig).checkMapSizeLimits.
c.NATMapEntriesGlobal = (c.CTMapEntriesGlobalTCP + c.CTMapEntriesGlobalAny) * 2 / 3
log.Warningf("option %s would exceed maximum determined by CT table sizes, capping to %v",
NATMapEntriesGlobalName, c.NATMapEntriesGlobal)
}
} else {
log.Debugf("option %s set by user to %v", NATMapEntriesGlobalName, c.NATMapEntriesGlobal)
}
if !vp.IsSet(NeighMapEntriesGlobalName) {
// By default we auto-size it to the same value as the NAT map since we
// need to keep at least as many neigh entries.
c.NeighMapEntriesGlobal = c.NATMapEntriesGlobal
log.Infof("option %s set by dynamic sizing to %v",
NeighMapEntriesGlobalName, c.NeighMapEntriesGlobal)
} else {
log.Debugf("option %s set by user to %v", NeighMapEntriesGlobalName, c.NeighMapEntriesGlobal)
}
if !vp.IsSet(SockRevNatEntriesName) {
c.SockRevNatEntries =
getEntries(SockRevNATMapEntriesDefault, LimitTableAutoSockRevNatMin, LimitTableMax)
log.Infof("option %s set by dynamic sizing to %v",
SockRevNatEntriesName, c.SockRevNatEntries)
} else {
log.Debugf("option %s set by user to %v", NATMapEntriesGlobalName, c.NATMapEntriesGlobal)
}
}
// Validate VTEP integration configuration
func (c *DaemonConfig) validateVTEP(vp *viper.Viper) error {
vtepEndpoints := vp.GetStringSlice(VtepEndpoint)
vtepCIDRs := vp.GetStringSlice(VtepCIDR)
vtepCidrMask := vp.GetString(VtepMask)
vtepMACs := vp.GetStringSlice(VtepMAC)
if (len(vtepEndpoints) < 1) ||
len(vtepEndpoints) != len(vtepCIDRs) ||
len(vtepEndpoints) != len(vtepMACs) {
return fmt.Errorf("VTEP configuration must have the same number of Endpoint, VTEP and MAC configurations (Found %d endpoints, %d MACs, %d CIDR ranges)", len(vtepEndpoints), len(vtepMACs), len(vtepCIDRs))
}
if len(vtepEndpoints) > defaults.MaxVTEPDevices {
return fmt.Errorf("VTEP must not exceed %d VTEP devices (Found %d VTEPs)", defaults.MaxVTEPDevices, len(vtepEndpoints))
}
for _, ep := range vtepEndpoints {
endpoint := net.ParseIP(ep)
if endpoint == nil {
return fmt.Errorf("Invalid VTEP IP: %v", ep)
}
ip4 := endpoint.To4()
if ip4 == nil {
return fmt.Errorf("Invalid VTEP IPv4 address %v", ip4)
}
c.VtepEndpoints = append(c.VtepEndpoints, endpoint)
}
for _, v := range vtepCIDRs {
externalCIDR, err := cidr.ParseCIDR(v)
if err != nil {
return fmt.Errorf("Invalid VTEP CIDR: %v", v)
}
c.VtepCIDRs = append(c.VtepCIDRs, externalCIDR)
}
mask := net.ParseIP(vtepCidrMask)
if mask == nil {
return fmt.Errorf("Invalid VTEP CIDR Mask: %v", vtepCidrMask)
}
c.VtepCidrMask = mask
for _, m := range vtepMACs {
externalMAC, err := mac.ParseMAC(m)
if err != nil {
return fmt.Errorf("Invalid VTEP MAC: %v", m)
}
c.VtepMACs = append(c.VtepMACs, externalMAC)
}
return nil
}
// KubeProxyReplacementFullyEnabled returns true if Cilium is _effectively_
// running in full KPR mode.
func (c *DaemonConfig) KubeProxyReplacementFullyEnabled() bool {
return c.EnableHostPort &&
c.EnableNodePort &&
c.EnableExternalIPs &&
c.EnableSocketLB &&
c.EnableSessionAffinity
}
var backupFileNames []string = []string{
"agent-runtime-config.json",
"agent-runtime-config-1.json",
"agent-runtime-config-2.json",
}
// StoreInFile stores the configuration in a the given directory under the file
// name 'daemon-config.json'. If this file already exists, it is renamed to
// 'daemon-config-1.json', if 'daemon-config-1.json' also exists,
// 'daemon-config-1.json' is renamed to 'daemon-config-2.json'
// Caller is responsible for blocking concurrent changes.
func (c *DaemonConfig) StoreInFile(dir string) error {
backupFiles(dir, backupFileNames)
f, err := os.Create(backupFileNames[0])
if err != nil {
return err
}
defer f.Close()
e := json.NewEncoder(f)
e.SetIndent("", " ")
err = e.Encode(c)
c.shaSum = c.checksum()
return err
}
func (c *DaemonConfig) checksum() [32]byte {
// take a shallow copy for summing
sumConfig := *c
// Ignore variable parts
sumConfig.Opts = nil
sumConfig.EncryptInterface = nil
cBytes, err := json.Marshal(&sumConfig)
if err != nil {
return [32]byte{}
}
return sha256.Sum256(cBytes)
}
// ValidateUnchanged checks that invariable parts of the config have not changed since init.
// Caller is responsible for blocking concurrent changes.
func (c *DaemonConfig) ValidateUnchanged() error {
sum := c.checksum()
if sum != c.shaSum {
return c.diffFromFile()
}
return nil
}
func (c *DaemonConfig) diffFromFile() error {
f, err := os.Open(backupFileNames[0])
if err != nil {
return err
}
fi, err := f.Stat()
if err != nil {
return err
}
fileBytes := make([]byte, fi.Size())
count, err := f.Read(fileBytes)
if err != nil {
return err
}
fileBytes = fileBytes[:count]
var config DaemonConfig
err = json.Unmarshal(fileBytes, &config)
var diff string
if err != nil {
diff = fmt.Errorf("unmarshal failed %q: %w", string(fileBytes), err).Error()
} else {
// Ignore all unexported fields during Diff.
// from https://github.com/google/go-cmp/issues/313#issuecomment-1315651560
opts := cmp.FilterPath(func(p cmp.Path) bool {
sf, ok := p.Index(-1).(cmp.StructField)
if !ok {
return false
}
r, _ := utf8.DecodeRuneInString(sf.Name())
return !unicode.IsUpper(r)
}, cmp.Ignore())
diff = cmp.Diff(&config, c, opts,
cmpopts.IgnoreTypes(&IntOptions{}),
cmpopts.IgnoreTypes(&OptionLibrary{}),
cmpopts.IgnoreFields(DaemonConfig{}, "EncryptInterface"))
}
return fmt.Errorf("Config differs:\n%s", diff)
}
func (c *DaemonConfig) BGPControlPlaneEnabled() bool {
return c.EnableBGPControlPlane
}
func (c *DaemonConfig) IsDualStack() bool {
return c.EnableIPv4 && c.EnableIPv6
}
// IsLocalRouterIP checks if provided IP address matches either LocalRouterIPv4
// or LocalRouterIPv6
func (c *DaemonConfig) IsLocalRouterIP(ip string) bool {
return ip != "" && (c.LocalRouterIPv4 == ip || c.LocalRouterIPv6 == ip)
}
// StoreViperInFile stores viper's configuration in a the given directory under
// the file name 'viper-config.yaml'. If this file already exists, it is renamed
// to 'viper-config-1.yaml', if 'viper-config-1.yaml' also exists,
// 'viper-config-1.yaml' is renamed to 'viper-config-2.yaml'
func StoreViperInFile(dir string) error {
backupFileNames := []string{
"viper-agent-config.yaml",
"viper-agent-config-1.yaml",
"viper-agent-config-2.yaml",
}
backupFiles(dir, backupFileNames)
return viper.WriteConfigAs(backupFileNames[0])
}
func backupFiles(dir string, backupFilenames []string) {
for i := len(backupFilenames) - 1; i > 0; i-- {
newFileName := filepath.Join(dir, backupFilenames[i-1])
oldestFilename := filepath.Join(dir, backupFilenames[i])
if _, err := os.Stat(newFileName); os.IsNotExist(err) {
continue
}
err := os.Rename(newFileName, oldestFilename)
if err != nil {
log.WithError(err).WithFields(logrus.Fields{
"old-name": oldestFilename,
"new-name": newFileName,
}).Error("Unable to rename configuration files")
}
}
}
func sanitizeIntParam(vp *viper.Viper, paramName string, paramDefault int) int {
intParam := vp.GetInt(paramName)
if intParam <= 0 {
if vp.IsSet(paramName) {
log.WithFields(
logrus.Fields{
"parameter": paramName,
"defaultValue": paramDefault,
}).Warning("user-provided parameter had value <= 0 , which is invalid ; setting to default")
}
return paramDefault
}
return intParam
}
func validateConfigMapFlag(flag *pflag.Flag, key string, value interface{}) error {
var err error
switch t := flag.Value.Type(); t {
case "bool":
_, err = cast.ToBoolE(value)
case "duration":
_, err = cast.ToDurationE(value)
case "float32":
_, err = cast.ToFloat32E(value)
case "float64":
_, err = cast.ToFloat64E(value)
case "int":
_, err = cast.ToIntE(value)
case "int8":
_, err = cast.ToInt8E(value)
case "int16":
_, err = cast.ToInt16E(value)
case "int32":
_, err = cast.ToInt32E(value)
case "int64":
_, err = cast.ToInt64E(value)
case "map":
// custom type, see pkg/option/map_options.go
err = flag.Value.Set(fmt.Sprintf("%s", value))
case "stringSlice":
_, err = cast.ToStringSliceE(value)
case "string":
_, err = cast.ToStringE(value)
case "uint":
_, err = cast.ToUintE(value)
case "uint8":
_, err = cast.ToUint8E(value)
case "uint16":
_, err = cast.ToUint16E(value)
case "uint32":
_, err = cast.ToUint32E(value)
case "uint64":
_, err = cast.ToUint64E(value)
case "stringToString":
_, err = command.ToStringMapStringE(value)
default:
log.Warnf("Unable to validate option %s value of type %s", key, t)
}
return err
}
// validateConfigMap checks whether the flag exists and validate its value
func validateConfigMap(cmd *cobra.Command, m map[string]interface{}) error {
flags := cmd.Flags()
for key, value := range m {
flag := flags.Lookup(key)
if flag == nil {
continue
}
err := validateConfigMapFlag(flag, key, value)
if err != nil {
return fmt.Errorf("option %s: %w", key, err)
}
}
return nil
}
// InitConfig reads in config file and ENV variables if set.
func InitConfig(cmd *cobra.Command, programName, configName string, vp *viper.Viper) func() {
return func() {
if vp.GetBool("version") {
fmt.Printf("%s %s\n", programName, version.Version)
os.Exit(0)
}
if vp.GetString(CMDRef) != "" {
return
}
Config.ConfigFile = vp.GetString(ConfigFile) // enable ability to specify config file via flag
Config.ConfigDir = vp.GetString(ConfigDir)
vp.SetEnvPrefix("cilium")
if Config.ConfigDir != "" {
if _, err := os.Stat(Config.ConfigDir); os.IsNotExist(err) {
log.Fatalf("Non-existent configuration directory %s", Config.ConfigDir)
}
if m, err := ReadDirConfig(Config.ConfigDir); err != nil {
log.WithError(err).Fatalf("Unable to read configuration directory %s", Config.ConfigDir)
} else {
// replace deprecated fields with new fields
ReplaceDeprecatedFields(m)
// validate the config-map
if err := validateConfigMap(cmd, m); err != nil {
log.WithError(err).Fatal("Incorrect config-map flag value")
}
if err := MergeConfig(vp, m); err != nil {
log.WithError(err).Fatal("Unable to merge configuration")
}
}
}
if Config.ConfigFile != "" {
vp.SetConfigFile(Config.ConfigFile)
} else {
vp.SetConfigName(configName) // name of config file (without extension)
vp.AddConfigPath("$HOME") // adding home directory as first search path
}
// We need to check for the debug environment variable or CLI flag before
// loading the configuration file since on configuration file read failure
// we will emit a debug log entry.
if vp.GetBool(DebugArg) {
logging.SetLogLevelToDebug()
}
// If a config file is found, read it in.
if err := vp.ReadInConfig(); err == nil {
log.WithField(logfields.Path, vp.ConfigFileUsed()).
Info("Using config from file")
} else if Config.ConfigFile != "" {
log.WithField(logfields.Path, Config.ConfigFile).WithError(err).
Fatal("Error reading config file")
} else {
log.WithError(err).Debug("Skipped reading configuration file")
}
// Check for the debug flag again now that the configuration file may has
// been loaded, as it might have changed.
if vp.GetBool("debug") {
logging.SetLogLevelToDebug()
}
}
}
// BPFEventBufferConfig contains parsed configuration for a bpf map event buffer.
type BPFEventBufferConfig struct {
Enabled bool
MaxSize int
TTL time.Duration
}
// BPFEventBufferConfigs contains parsed bpf event buffer configs, indexed but map name.
type BPFEventBufferConfigs map[string]BPFEventBufferConfig
// GetEventBufferConfig returns either the relevant config for a map name, or a default
// one with enabled=false otherwise.
func (d *DaemonConfig) GetEventBufferConfig(name string) BPFEventBufferConfig {
return d.bpfMapEventConfigs.get(name)
}
func (cs BPFEventBufferConfigs) get(name string) BPFEventBufferConfig {
return cs[name]
}
// ParseEventBufferTupleString parses a event buffer configuration tuple string.
// For example: true,100,24h
// Which refers to enabled=true, maxSize=100, ttl=24hours.
func ParseEventBufferTupleString(optsStr string) (BPFEventBufferConfig, error) {
opts := strings.Split(optsStr, ",")
enabled := false
conf := BPFEventBufferConfig{}
if len(opts) != 3 {
return conf, fmt.Errorf("unexpected event buffer config value format, should be in format 'mapname=enabled,100,24h'")
}
if opts[0] != "enabled" && opts[0] != "disabled" {
return conf, fmt.Errorf("could not parse event buffer enabled: must be either 'enabled' or 'disabled'")
}
if opts[0] == "enabled" {
enabled = true
}
size, err := strconv.Atoi(opts[1])
if err != nil {
return conf, fmt.Errorf("could not parse event buffer maxSize int: %w", err)
}
ttl, err := time.ParseDuration(opts[2])
if err != nil {
return conf, fmt.Errorf("could not parse event buffer ttl duration: %w", err)
}
if size < 0 {
return conf, fmt.Errorf("event buffer max size cannot be less than zero (%d)", conf.MaxSize)
}
conf.TTL = ttl
conf.Enabled = enabled && size != 0
conf.MaxSize = size
return conf, nil
}
func parseBPFMapEventConfigs(confs BPFEventBufferConfigs, confMap map[string]string) error {
for name, confStr := range confMap {
conf, err := ParseEventBufferTupleString(confStr)
if err != nil {
return fmt.Errorf("unable to parse %s: %w", BPFMapEventBuffers, err)
}
confs[name] = conf
}
return nil
}
func (d *DaemonConfig) EnforceLXCFibLookup() bool {
// See https://github.com/cilium/cilium/issues/27343 for the symptoms.
//
// We want to enforce FIB lookup if EndpointRoutes are enabled, because
// this was a config dependency change which caused different behaviour
// since v1.14.0-snapshot.2. We will remove this hack later, once we
// have auto-device detection on by default.
return d.EnableEndpointRoutes
}
func (d *DaemonConfig) GetZone(id uint8) string {
return d.ReverseFixedZoneMapping[id]
}
func (d *DaemonConfig) GetZoneID(zone string) uint8 {
return d.FixedZoneMapping[zone]
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
var (
specPolicyTracing = Option{
Description: "Enable tracing when resolving policy (Debug)",
}
// DaemonOptionLibrary is the daemon's option library that should be
// used for read-only.
DaemonOptionLibrary = OptionLibrary{
PolicyTracing: &specPolicyTracing,
}
DaemonMutableOptionLibrary = OptionLibrary{
ConntrackAccounting: &specConntrackAccounting,
PolicyAccounting: &specPolicyAccounting,
ConntrackLocal: &specConntrackLocal,
Debug: &specDebug,
DebugLB: &specDebugLB,
DebugPolicy: &specDebugPolicy,
DropNotify: &specDropNotify,
TraceNotify: &specTraceNotify,
PolicyVerdictNotify: &specPolicyVerdictNotify,
PolicyAuditMode: &specPolicyAuditMode,
MonitorAggregation: &specMonitorAggregation,
SourceIPVerification: &specSourceIPVerification,
}
)
func init() {
for k, v := range DaemonMutableOptionLibrary {
DaemonOptionLibrary[k] = v
}
}
// ParseDaemonOption parses a string as daemon option
func ParseDaemonOption(opt string) (string, OptionSetting, bool, error) {
return DaemonOptionLibrary.ParseOption(opt)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
var (
endpointMutableOptionLibrary = OptionLibrary{
ConntrackAccounting: &specConntrackAccounting,
PolicyAccounting: &specPolicyAccounting,
ConntrackLocal: &specConntrackLocal,
Debug: &specDebug,
DebugLB: &specDebugLB,
DebugPolicy: &specDebugPolicy,
DropNotify: &specDropNotify,
TraceNotify: &specTraceNotify,
PolicyVerdictNotify: &specPolicyVerdictNotify,
PolicyAuditMode: &specPolicyAuditMode,
MonitorAggregation: &specMonitorAggregation,
SourceIPVerification: &specSourceIPVerification,
}
)
func GetEndpointMutableOptionLibrary() OptionLibrary {
opt := OptionLibrary{}
for k, v := range endpointMutableOptionLibrary {
opt[k] = v
}
return opt
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import "cmp"
// NetworkPolicyEnabled returns true if the network policy enforcement
// system is enabled for K8s, Cilium and Cilium Clusterwide network policies.
func NetworkPolicyEnabled(cfg *DaemonConfig) bool {
return cmp.Or(
cfg.EnablePolicy != NeverEnforce,
cfg.EnableK8sNetworkPolicy,
cfg.EnableCiliumNetworkPolicy,
cfg.EnableCiliumClusterwideNetworkPolicy,
!cfg.DisableCiliumEndpointCRD,
cfg.IdentityAllocationMode != IdentityAllocationModeCRD,
)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"fmt"
"strings"
)
// Validator returns a validated string along with a possible error.
type Validator func(val string) (string, error)
// MapOptions holds a map of values and a validation function.
type MapOptions struct {
vals map[string]string
validator Validator
}
// NamedMapOptions is a MapOptions struct with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedMapOptions struct {
name string
MapOptions
}
// NewNamedMapOptions creates a reference to a new NamedMapOpts struct.
func NewNamedMapOptions(name string, values *map[string]string, validator Validator) *NamedMapOptions {
return &NamedMapOptions{
name: name,
MapOptions: *NewMapOpts(*values, validator),
}
}
// NewMapOpts creates a new MapOpts with the specified map of values and an
// optional validator.
func NewMapOpts(values map[string]string, validator Validator) *MapOptions {
if values == nil {
values = make(map[string]string)
}
return &MapOptions{
vals: values,
validator: validator,
}
}
func (opts *MapOptions) String() string {
var kvs []string
for k, v := range opts.vals {
kvs = append(kvs, fmt.Sprintf("%s=%s", k, v))
}
return strings.Join(kvs, ",")
}
// Type returns a string name for this Option type
func (opts *MapOptions) Type() string {
return "map"
}
// Set validates, if needed, the input value and adds it to the internal map,
// by splitting on '='.
func (opts *MapOptions) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
vals := strings.SplitN(value, "=", 2)
if len(vals) == 1 {
(opts.vals)[vals[0]] = ""
} else {
(opts.vals)[vals[0]] = vals[1]
}
return nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"fmt"
"strconv"
"strings"
)
// MonitorAggregationLevel represents a level of aggregation for monitor events
// from the datapath. Low values represent no aggregation, that is, to increase
// the number of events emitted from the datapath; Higher values represent more
// aggregation, to minimize the number of events emitted from the datapath.
//
// The MonitorAggregationLevel does not affect the Debug option in the daemon
// or endpoint, so debug notifications will continue uninhibited by this
// setting.
type MonitorAggregationLevel OptionSetting
const (
// MonitorAggregationLevelNone represents no aggregation in the
// datapath; all packets will be monitored.
MonitorAggregationLevelNone OptionSetting = 0
// MonitorAggregationLevelLow represents aggregation of monitor events
// to emit a maximum of one trace event per packet. Trace events when
// packets are received are disabled.
MonitorAggregationLevelLowest OptionSetting = 1
// MonitorAggregationLevelLow is the same as
// MonitorAggregationLevelLowest, but may aggregate additional traffic
// in future.
MonitorAggregationLevelLow OptionSetting = 2
// MonitorAggregationLevelMedium represents aggregation of monitor
// events to only emit notifications periodically for each connection
// unless there is new information (eg, a TCP connection is closed).
MonitorAggregationLevelMedium OptionSetting = 3
// MonitorAggregationLevelMax is the maximum level of aggregation
// currently supported.
MonitorAggregationLevelMax OptionSetting = 4
)
// monitorAggregationOption maps a user-specified string to a monitor
// aggregation level.
var monitorAggregationOption = map[string]OptionSetting{
"": MonitorAggregationLevelNone,
"none": MonitorAggregationLevelNone,
"disabled": MonitorAggregationLevelNone,
"lowest": MonitorAggregationLevelLowest,
"low": MonitorAggregationLevelLow,
"medium": MonitorAggregationLevelMedium,
"max": MonitorAggregationLevelMax,
"maximum": MonitorAggregationLevelMax,
}
func init() {
for i := MonitorAggregationLevelNone; i <= MonitorAggregationLevelMax; i++ {
number := strconv.Itoa(int(i))
monitorAggregationOption[number] = OptionSetting(i)
}
}
// monitorAggregationFormat maps an aggregation level to a formatted string.
var monitorAggregationFormat = map[OptionSetting]string{
MonitorAggregationLevelNone: "None",
MonitorAggregationLevelLowest: "Lowest",
MonitorAggregationLevelLow: "Low",
MonitorAggregationLevelMedium: "Medium",
MonitorAggregationLevelMax: "Max",
}
// VerifyMonitorAggregationLevel validates the specified key/value for a
// monitor aggregation level.
func VerifyMonitorAggregationLevel(key, value string) error {
_, err := ParseMonitorAggregationLevel(value)
return err
}
// ParseMonitorAggregationLevel turns a string into a monitor aggregation
// level. The string may contain an integer value or a string representation of
// a particular monitor aggregation level.
func ParseMonitorAggregationLevel(value string) (OptionSetting, error) {
// First, attempt the string representation.
if level, ok := monitorAggregationOption[strings.ToLower(value)]; ok {
return level, nil
}
// If it's not a valid string option, attempt to parse an integer.
valueParsed, err := strconv.Atoi(value)
if err != nil {
err = fmt.Errorf("invalid monitor aggregation level %q", value)
return MonitorAggregationLevelNone, err
}
parsed := OptionSetting(valueParsed)
if parsed < MonitorAggregationLevelNone || parsed > MonitorAggregationLevelMax {
err = fmt.Errorf("monitor aggregation level must be between %d and %d",
MonitorAggregationLevelNone, MonitorAggregationLevelMax)
return MonitorAggregationLevelNone, err
}
return parsed, nil
}
// FormatMonitorAggregationLevel maps a MonitorAggregationLevel to a string.
func FormatMonitorAggregationLevel(level OptionSetting) string {
return monitorAggregationFormat[level]
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package option
import (
"encoding/json"
"fmt"
"slices"
"strings"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/lock"
)
// VerifyFunc validates option key with value and may return an error if the
// option should not be applied
type VerifyFunc func(key string, value string) error
// ParseFunc parses the option value and may return an error if the option
// cannot be parsed or applied.
type ParseFunc func(value string) (OptionSetting, error)
// FormatFunc formats the specified value as textual representation option.
type FormatFunc func(value OptionSetting) string
// Option is the structure used to specify the semantics of a configurable
// boolean option
type Option struct {
// Define is the name of the #define used for BPF programs
Define string
// Description is a short human readable description
Description string
// Immutable marks an option which is read-only
Immutable bool
// Requires is a list of required options, such options will be
// automatically enabled as required.
Requires []string
// Parse is called to parse the option. If not specified, defaults to
// NormalizeBool().
Parse ParseFunc
// FormatFunc is called to format the value for an option. If not
// specified, defaults to formatting 0 as "Disabled" and other values
// as "Enabled".
Format FormatFunc
// Verify is called prior to applying the option
Verify VerifyFunc
// Deprecated is true if this option is deprecated and a warning
// should be printed.
Deprecated bool
}
// OptionSetting specifies the different choices each Option has.
type OptionSetting int
const (
OptionDisabled OptionSetting = iota
OptionEnabled
)
// RequiresOption returns true if the option requires the specified option `name`.
func (o Option) RequiresOption(name string) bool {
for _, o := range o.Requires {
if o == name {
return true
}
}
return false
}
type OptionLibrary map[string]*Option
func (l OptionLibrary) Lookup(name string) (string, *Option) {
nameLower := strings.ToLower(name)
for k := range l {
if strings.ToLower(k) == nameLower {
return k, l[k]
}
}
return "", nil
}
func (l OptionLibrary) Define(name string) string {
if _, ok := l[name]; ok {
return l[name].Define
}
return name
}
func NormalizeBool(value string) (OptionSetting, error) {
switch strings.ToLower(value) {
case "true", "on", "enable", "enabled", "1":
return OptionEnabled, nil
case "false", "off", "disable", "disabled", "0":
return OptionDisabled, nil
default:
return OptionDisabled, fmt.Errorf("invalid option value %s", value)
}
}
// ValidateConfigurationMap validates a given configuration map based on the
// option library
func (l *OptionLibrary) ValidateConfigurationMap(n models.ConfigurationMap) (OptionMap, error) {
o := make(OptionMap)
for k, v := range n {
_, newVal, _, err := l.parseKeyValue(k, v)
if err != nil {
return nil, err
}
if err := l.Validate(k, v); err != nil {
return nil, err
}
o[k] = newVal
}
return o, nil
}
func (l OptionLibrary) Validate(name string, value string) error {
key, spec := l.Lookup(name)
if key == "" {
return fmt.Errorf("unknown option %s", name)
}
if spec.Immutable {
return fmt.Errorf("specified option is immutable (read-only)")
}
if spec.Verify != nil {
return spec.Verify(key, value)
}
return nil
}
type OptionMap map[string]OptionSetting
func (om OptionMap) DeepCopy() OptionMap {
cpy := make(OptionMap, len(om))
for k, v := range om {
cpy[k] = v
}
return cpy
}
// IntOptions member functions with external access do not require
// locking by the caller, while functions with internal access presume
// the caller to have taken care of any locking needed.
type IntOptions struct {
optsMU lock.RWMutex // Protects all variables from this structure below this line
opts OptionMap
library *OptionLibrary
}
// intOptions is only used for JSON
type intOptions struct {
Opts OptionMap `json:"map"`
}
// ValidateConfigurationMap validates a given configuration map based on the
// option library
func (o *IntOptions) ValidateConfigurationMap(n models.ConfigurationMap) (OptionMap, error) {
return o.library.ValidateConfigurationMap(n)
}
// Custom json marshal for unexported 'opts' while holding a read lock
func (o *IntOptions) MarshalJSON() ([]byte, error) {
o.optsMU.RLock()
defer o.optsMU.RUnlock()
return json.Marshal(&intOptions{
Opts: o.opts,
})
}
// Custom json unmarshal for unexported 'opts' while holding a write lock
func (o *IntOptions) UnmarshalJSON(b []byte) error {
o.optsMU.Lock()
defer o.optsMU.Unlock()
err := json.Unmarshal(b, &intOptions{
Opts: o.opts,
})
if err != nil {
return err
}
// Silently discard unsupported options
for k := range o.opts {
key, _ := o.library.Lookup(k)
if key == "" {
delete(o.opts, k)
}
}
return nil
}
// GetImmutableModel returns the set of immutable options as a ConfigurationMap API model.
func (o *IntOptions) GetImmutableModel() *models.ConfigurationMap {
immutableCfg := make(models.ConfigurationMap)
return &immutableCfg
}
// GetMutableModel returns the set of mutable options as a ConfigurationMap API model.
func (o *IntOptions) GetMutableModel() *models.ConfigurationMap {
mutableCfg := make(models.ConfigurationMap)
o.optsMU.RLock()
for k, v := range o.opts {
_, config := o.library.Lookup(k)
// It's possible that an option has since been removed and thus has
// no corresponding configuration; need to check if configuration is
// nil accordingly.
if config != nil {
if config.Format == nil {
if v == OptionDisabled {
mutableCfg[k] = "Disabled"
} else {
mutableCfg[k] = "Enabled"
}
} else {
mutableCfg[k] = config.Format(v)
}
}
}
o.optsMU.RUnlock()
return &mutableCfg
}
func (o *IntOptions) DeepCopy() *IntOptions {
o.optsMU.RLock()
cpy := &IntOptions{
opts: o.opts.DeepCopy(),
library: o.library,
}
o.optsMU.RUnlock()
return cpy
}
func NewIntOptions(lib *OptionLibrary) *IntOptions {
return &IntOptions{
opts: OptionMap{},
library: lib,
}
}
func (o *IntOptions) getValue(key string) OptionSetting {
value, exists := o.opts[key]
if !exists {
return OptionDisabled
}
return value
}
func (o *IntOptions) GetValue(key string) OptionSetting {
o.optsMU.RLock()
v := o.getValue(key)
o.optsMU.RUnlock()
return v
}
func (o *IntOptions) IsEnabled(key string) bool {
return o.GetValue(key) != OptionDisabled
}
// SetValidated sets the option `key` to the specified value. The caller is
// expected to have validated the input to this function.
func (o *IntOptions) SetValidated(key string, value OptionSetting) {
o.optsMU.Lock()
o.opts[key] = value
o.optsMU.Unlock()
}
// SetBool sets the specified option to Enabled.
func (o *IntOptions) SetBool(key string, value bool) {
intValue := OptionDisabled
if value {
intValue = OptionEnabled
}
o.optsMU.Lock()
o.opts[key] = intValue
o.optsMU.Unlock()
}
func (o *IntOptions) Delete(key string) {
o.optsMU.Lock()
delete(o.opts, key)
o.optsMU.Unlock()
}
func (o *IntOptions) SetIfUnset(key string, value OptionSetting) {
o.optsMU.Lock()
if _, exists := o.opts[key]; !exists {
o.opts[key] = value
}
o.optsMU.Unlock()
}
func (o *IntOptions) InheritDefault(parent *IntOptions, key string) {
o.optsMU.RLock()
o.opts[key] = parent.GetValue(key)
o.optsMU.RUnlock()
}
func (l *OptionLibrary) ParseOption(arg string) (string, OptionSetting, bool, error) {
result := OptionEnabled
if arg[0] == '!' {
result = OptionDisabled
arg = arg[1:]
}
optionSplit := strings.SplitN(arg, "=", 2)
arg = optionSplit[0]
if len(optionSplit) > 1 {
if result == OptionDisabled {
return "", OptionDisabled, false, fmt.Errorf("invalid boolean format")
}
return l.parseKeyValue(arg, optionSplit[1])
}
return "", OptionDisabled, false, fmt.Errorf("invalid option format")
}
func (l *OptionLibrary) parseKeyValue(arg, value string) (string, OptionSetting, bool, error) {
var result OptionSetting
key, spec := l.Lookup(arg)
if key == "" {
return "", OptionDisabled, false, fmt.Errorf("unknown option %q", arg)
}
var err error
if spec.Parse != nil {
result, err = spec.Parse(value)
} else {
result, err = NormalizeBool(value)
}
if err != nil {
return "", OptionDisabled, false, err
}
if spec.Immutable {
return "", OptionDisabled, spec.Deprecated, fmt.Errorf("specified option is immutable (read-only)")
}
return key, result, spec.Deprecated, nil
}
// getFmtOpt returns #define name if option exists and is set to true in endpoint's Opts
// map or #undef name if option does not exist or exists but is set to false
func (o *IntOptions) getFmtOpt(name string) string {
define := o.library.Define(name)
if define == "" {
return ""
}
value := o.getValue(name)
if value != OptionDisabled {
return fmt.Sprintf("#define %s %d", o.library.Define(name), value)
}
return "#undef " + o.library.Define(name)
}
func (o *IntOptions) GetFmtList() string {
txt := ""
o.optsMU.RLock()
opts := make([]string, 0, len(o.opts))
for k := range o.opts {
opts = append(opts, k)
}
slices.Sort(opts)
for _, k := range opts {
def := o.getFmtOpt(k)
if def != "" {
txt += def + "\n"
}
}
o.optsMU.RUnlock()
return txt
}
func (o *IntOptions) Dump() {
if o == nil {
return
}
o.optsMU.RLock()
opts := make([]string, 0, len(o.opts))
for k := range o.opts {
opts = append(opts, k)
}
slices.Sort(opts)
for _, k := range opts {
var text string
_, option := o.library.Lookup(k)
if option == nil || option.Format == nil {
if o.opts[k] == OptionDisabled {
text = "Disabled"
} else {
text = "Enabled"
}
} else {
text = option.Format(o.opts[k])
}
fmt.Printf("%-24s %s\n", k, text)
}
o.optsMU.RUnlock()
}
// Validate validates a given configuration map based on the option library
func (o *IntOptions) Validate(n models.ConfigurationMap) error {
o.optsMU.RLock()
defer o.optsMU.RUnlock()
for k, v := range n {
_, newVal, _, err := o.library.parseKeyValue(k, v)
if err != nil {
return err
}
// Ignore validation if value is identical
if oldVal, ok := o.opts[k]; ok && oldVal == newVal {
continue
}
if err := o.library.Validate(k, v); err != nil {
return err
}
}
return nil
}
// ChangedFunc is called by `Apply()` for each option changed
type ChangedFunc func(key string, value OptionSetting, data interface{})
// enable enables the option `name` with all its dependencies
func (o *IntOptions) enable(name string) {
if o.library != nil {
if _, opt := o.library.Lookup(name); opt != nil {
for _, dependency := range opt.Requires {
o.enable(dependency)
}
}
}
o.opts[name] = OptionEnabled
}
// set enables the option `name` with all its dependencies, and sets the
// integer level of the option to `value`.
func (o *IntOptions) set(name string, value OptionSetting) {
o.enable(name)
o.opts[name] = value
}
// disable disables the option `name`. All options which depend on the option
// to be disabled will be disabled. Options which have previously been enabled
// as a dependency will not be automatically disabled.
func (o *IntOptions) disable(name string) {
o.opts[name] = OptionDisabled
if o.library != nil {
// Disable all options which have a dependency on the option
// that was just disabled
for key, opt := range *o.library {
if opt.RequiresOption(name) && o.opts[key] != OptionDisabled {
o.disable(key)
}
}
}
}
type changedOptions struct {
key string
value OptionSetting
}
// ApplyValidated takes a configuration map and applies the changes. For an
// option which is changed, the `ChangedFunc` function is called with the
// `data` argument passed in as well. Returns the number of options changed if
// any.
//
// The caller is expected to have validated the configuration options prior to
// calling this function.
func (o *IntOptions) ApplyValidated(n OptionMap, changed ChangedFunc, data interface{}) int {
changes := make([]changedOptions, 0, len(n))
o.optsMU.Lock()
for k, optVal := range n {
val, ok := o.opts[k]
if optVal == OptionDisabled {
/* Only disable if enabled already */
if ok && val != OptionDisabled {
o.disable(k)
changes = append(changes, changedOptions{key: k, value: optVal})
}
} else {
/* Only enable if not enabled already */
if !ok || val == OptionDisabled {
o.set(k, optVal)
changes = append(changes, changedOptions{key: k, value: optVal})
}
}
}
o.optsMU.Unlock()
for _, change := range changes {
changed(change.key, change.value, data)
}
return len(changes)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"net/netip"
"github.com/cilium/cilium/pkg/ip"
"github.com/cilium/cilium/pkg/policy/api"
"k8s.io/apimachinery/pkg/util/sets"
)
// getPrefixesFromCIDR fetches all CIDRs referred to by the specified slice
// and returns them as regular golang CIDR objects.
func getPrefixesFromCIDR(cidrs api.CIDRSlice) []netip.Prefix {
result, _, _ := ip.ParsePrefixes(cidrs.StringSlice())
return result
}
// GetPrefixesFromCIDRSet fetches all CIDRs referred to by the specified slice
// and returns them as regular golang CIDR objects. Includes CIDRs listed in
// ExceptCIDRs fields.
//
// Assumes that validation already occurred on 'rules'.
func GetPrefixesFromCIDRSet(rules api.CIDRRuleSlice) []netip.Prefix {
out := make([]netip.Prefix, 0, len(rules))
for _, rule := range rules {
if rule.Cidr != "" {
pfx, err := netip.ParsePrefix(string(rule.Cidr))
if err == nil {
// must parse, was already validated.
out = append(out, pfx.Masked())
}
}
for _, except := range rule.ExceptCIDRs {
pfx, err := netip.ParsePrefix(string(except))
if err == nil {
out = append(out, pfx.Masked())
}
}
}
return out
}
// GetCIDRPrefixes runs through the specified 'rules' to find every reference
// to a CIDR in the rules, and returns a slice containing all of these CIDRs.
//
// Includes prefixes referenced solely by "ExceptCIDRs" entries.
//
// Assumes that validation already occurred on 'rules'.
func GetCIDRPrefixes(rules api.Rules) []netip.Prefix {
if len(rules) == 0 {
return nil
}
res := make(sets.Set[netip.Prefix], 32)
for _, r := range rules {
for _, ir := range r.Ingress {
if len(ir.FromCIDR) > 0 {
res.Insert(getPrefixesFromCIDR(ir.FromCIDR)...)
}
if len(ir.FromCIDRSet) > 0 {
res.Insert(GetPrefixesFromCIDRSet(ir.FromCIDRSet)...)
}
}
for _, ir := range r.IngressDeny {
if len(ir.FromCIDR) > 0 {
res.Insert(getPrefixesFromCIDR(ir.FromCIDR)...)
}
if len(ir.FromCIDRSet) > 0 {
res.Insert(GetPrefixesFromCIDRSet(ir.FromCIDRSet)...)
}
}
for _, er := range r.Egress {
if len(er.ToCIDR) > 0 {
res.Insert(getPrefixesFromCIDR(er.ToCIDR)...)
}
if len(er.ToCIDRSet) > 0 {
res.Insert(GetPrefixesFromCIDRSet(er.ToCIDRSet)...)
}
}
for _, er := range r.EgressDeny {
if len(er.ToCIDR) > 0 {
res.Insert(getPrefixesFromCIDR(er.ToCIDR)...)
}
if len(er.ToCIDRSet) > 0 {
res.Insert(GetPrefixesFromCIDRSet(er.ToCIDRSet)...)
}
}
}
return res.UnsortedList()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
ipcacheTypes "github.com/cilium/cilium/pkg/ipcache/types"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/source"
"github.com/cilium/cilium/pkg/time"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "policy")
mutex lock.RWMutex // Protects enablePolicy
enablePolicy string // Whether policy enforcement is enabled.
)
// SetPolicyEnabled sets the policy enablement configuration. Valid values are:
// - endpoint.AlwaysEnforce
// - endpoint.NeverEnforce
// - endpoint.DefaultEnforcement
func SetPolicyEnabled(val string) {
mutex.Lock()
enablePolicy = val
mutex.Unlock()
}
// GetPolicyEnabled returns the policy enablement configuration
func GetPolicyEnabled() string {
mutex.RLock()
val := enablePolicy
mutex.RUnlock()
return val
}
// AddOptions are options which can be passed to PolicyAdd
type AddOptions struct {
// Replace if true indicates that existing rules with identical labels should be replaced
Replace bool
// ReplaceWithLabels if present indicates that existing rules with the
// given LabelArray should be deleted.
ReplaceWithLabels labels.LabelArray
// Generated should be set as true to signalize a the policy being inserted
// was generated by cilium-agent, e.g. dns poller.
Generated bool
// The source of this policy, one of api, fqdn or k8s
Source source.Source
// The time the policy initially began to be processed in Cilium, such as when the
// policy was received from the API server.
ProcessingStartTime time.Time
// Resource provides the object ID for the underlying object that backs
// this information from 'source'.
Resource ipcacheTypes.ResourceID
// ReplaceByResource indicates the policy repository should replace any
// rules owned by the given Resource with the new set of rules
ReplaceByResource bool
}
// DeleteOptions are options which can be passed to PolicyDelete
type DeleteOptions struct {
// The source of this policy, one of api, fqdn or k8s
Source source.Source
// Resource provides the object ID for the underlying object that backs
// this information from 'source'.
Resource ipcacheTypes.ResourceID
// DeleteByResource should be true if the resource should be used to identify
// which rules should be deleted.
DeleteByResource bool
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"sync/atomic"
"github.com/cilium/cilium/pkg/container/versioned"
identityPkg "github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/identitymanager"
"github.com/cilium/cilium/pkg/lock"
)
// policyCache represents a cache of resolved policies for identities.
type policyCache struct {
lock.Mutex
// repo is a circular reference back to the Repository, but as
// we create only one Repository and one PolicyCache for each
// Cilium Agent process, these will never need to be garbage
// collected.
repo *Repository
policies map[identityPkg.NumericIdentity]*cachedSelectorPolicy
}
// newPolicyCache creates a new cache of SelectorPolicy.
func newPolicyCache(repo *Repository, idmgr identitymanager.IDManager) *policyCache {
cache := &policyCache{
repo: repo,
policies: make(map[identityPkg.NumericIdentity]*cachedSelectorPolicy),
}
if idmgr != nil {
idmgr.Subscribe(cache)
}
return cache
}
// lookupOrCreate adds the specified Identity to the policy cache, with a reference
// from the specified Endpoint, then returns the threadsafe copy of the policy.
func (cache *policyCache) lookupOrCreate(identity *identityPkg.Identity) *cachedSelectorPolicy {
cache.Lock()
defer cache.Unlock()
cip, ok := cache.policies[identity.ID]
if !ok {
cip = newCachedSelectorPolicy(identity)
cache.policies[identity.ID] = cip
}
return cip
}
// delete forgets about any cached SelectorPolicy that this endpoint uses.
//
// Returns true if the SelectorPolicy was removed from the cache.
func (cache *policyCache) delete(identity *identityPkg.Identity) bool {
cache.Lock()
defer cache.Unlock()
cip, ok := cache.policies[identity.ID]
if ok {
delete(cache.policies, identity.ID)
cip.getPolicy().Detach()
}
return ok
}
// updateSelectorPolicy resolves the policy for the security identity of the
// specified endpoint and stores it internally. It will skip policy resolution
// if the cached policy is already at the revision specified in the repo.
//
// Returns whether the cache was updated, or an error.
//
// Must be called with repo.Mutex held for reading.
func (cache *policyCache) updateSelectorPolicy(identity *identityPkg.Identity) (*selectorPolicy, bool, error) {
cip := cache.lookupOrCreate(identity)
// As long as UpdatePolicy() is triggered from endpoint
// regeneration, it's possible for two endpoints with the
// *same* identity to race to update the policy here. Such
// racing would lead to first of the endpoints using a
// selectorPolicy that is already detached from the selector
// cache, and thus not getting any incremental updates.
//
// Lock the 'cip' for the duration of the revision check and
// the possible policy update.
cip.Lock()
defer cip.Unlock()
// Don't resolve policy if it was already done for this or later revision.
if selPolicy := cip.getPolicy(); selPolicy != nil && selPolicy.Revision >= cache.repo.GetRevision() {
return selPolicy, false, nil
}
// Resolve the policies, which could fail
selPolicy, err := cache.repo.resolvePolicyLocked(identity)
if err != nil {
return nil, false, err
}
cip.setPolicy(selPolicy)
return selPolicy, true, nil
}
// LocalEndpointIdentityAdded is not needed; we only care about local endpoint
// deletion
func (cache *policyCache) LocalEndpointIdentityAdded(identity *identityPkg.Identity) {
}
// LocalEndpointIdentityRemoved deletes the cached SelectorPolicy for the
// specified Identity.
func (cache *policyCache) LocalEndpointIdentityRemoved(identity *identityPkg.Identity) {
cache.delete(identity)
}
// getAuthTypes returns the AuthTypes required by the policy between the localID and remoteID, if
// any, otherwise returns nil.
func (cache *policyCache) getAuthTypes(localID, remoteID identityPkg.NumericIdentity) AuthTypes {
cache.Lock()
cip, ok := cache.policies[localID]
cache.Unlock()
if !ok {
return nil // No policy for localID (no endpoint with localID)
}
// SelectorPolicy is const after it has been created, so no locking needed to access it
selPolicy := cip.getPolicy()
var resTypes AuthTypes
for cs, authTypes := range selPolicy.L4Policy.authMap {
missing := false
for authType := range authTypes {
if _, exists := resTypes[authType]; !exists {
missing = true
break
}
}
// Only check if 'cs' selects 'remoteID' if one of the authTypes is still missing
// from the result
if missing && cs.Selects(versioned.Latest(), remoteID) {
if resTypes == nil {
resTypes = make(AuthTypes, 1)
}
for authType := range authTypes {
resTypes[authType] = struct{}{}
}
}
}
return resTypes
}
// cachedSelectorPolicy is a wrapper around a selectorPolicy (stored in the
// 'policy' field). It is always nested directly in the owning policyCache,
// and is protected against concurrent writes via the policyCache mutex.
type cachedSelectorPolicy struct {
lock.Mutex // lock is needed to synchronize parallel policy updates
identity *identityPkg.Identity
policy atomic.Pointer[selectorPolicy]
}
func newCachedSelectorPolicy(identity *identityPkg.Identity) *cachedSelectorPolicy {
cip := &cachedSelectorPolicy{
identity: identity,
}
return cip
}
// getPolicy returns a reference to the selectorPolicy that is cached.
//
// Users should treat the result as immutable state that MUST NOT be modified.
func (cip *cachedSelectorPolicy) getPolicy() *selectorPolicy {
return cip.policy.Load()
}
// setPolicy updates the reference to the SelectorPolicy that is cached.
// Calls Detach() on the old policy, if any.
func (cip *cachedSelectorPolicy) setPolicy(policy *selectorPolicy) {
oldPolicy := cip.policy.Swap(policy)
if oldPolicy != nil {
// Release the references the previous policy holds on the selector cache.
oldPolicy.Detach()
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"errors"
"fmt"
"io"
stdlog "log"
"maps"
"net/netip"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/testutils"
)
const (
AuthTypeSpire = types.AuthTypeSpire
AuthTypeAlwaysFail = types.AuthTypeAlwaysFail
AuthTypeDisabled = types.AuthTypeDisabled
)
var (
ep1 = testutils.NewTestEndpoint()
ep2 = testutils.NewTestEndpoint()
)
func localIdentity(n uint32) identity.NumericIdentity {
return identity.NumericIdentity(n) | identity.IdentityScopeLocal
}
func TestCacheManagement(t *testing.T) {
repo := NewPolicyRepository(nil, nil, nil, nil, api.NewPolicyMetricsNoop())
cache := repo.policyCache
identity := ep1.GetSecurityIdentity()
require.Equal(t, identity, ep2.GetSecurityIdentity())
// Nonsense delete of entry that isn't yet inserted
deleted := cache.delete(identity)
require.False(t, deleted)
// Insert identity twice. Should be the same policy.
policy1, updated, err := cache.updateSelectorPolicy(identity)
require.NoError(t, err)
require.True(t, updated)
policy2, updated, err := cache.updateSelectorPolicy(identity)
require.NoError(t, err)
require.False(t, updated)
// must be same pointer
require.Same(t, policy2, policy1)
// Despite two insert calls, there is no reference tracking; any delete
// will clear the cache.
cacheCleared := cache.delete(identity)
require.True(t, cacheCleared)
cacheCleared = cache.delete(identity)
require.False(t, cacheCleared)
// Insert two distinct identities, then delete one. Other should still
// be there.
ep3 := testutils.NewTestEndpoint()
ep3.SetIdentity(1234, true)
identity3 := ep3.GetSecurityIdentity()
require.NotEqual(t, identity, identity3)
policy1, _, _ = cache.updateSelectorPolicy(identity)
require.NotNil(t, policy1)
policy3, _, _ := cache.updateSelectorPolicy(identity3)
require.NotNil(t, policy3)
require.NotSame(t, policy3, policy1)
_ = cache.delete(identity)
_, updated, _ = cache.updateSelectorPolicy(identity3)
require.False(t, updated)
}
func TestCachePopulation(t *testing.T) {
repo := NewPolicyRepository(nil, nil, nil, nil, api.NewPolicyMetricsNoop())
repo.revision.Store(42)
cache := repo.policyCache
identity1 := ep1.GetSecurityIdentity()
require.Equal(t, identity1, ep2.GetSecurityIdentity())
// Calculate the policy and observe that it's cached
policy1, updated, err := cache.updateSelectorPolicy(identity1)
require.NoError(t, err)
require.True(t, updated)
_, updated, err = cache.updateSelectorPolicy(identity1)
require.NoError(t, err)
require.False(t, updated)
policy2, _, _ := cache.updateSelectorPolicy(identity1)
require.NotNil(t, policy2)
require.Same(t, policy1, policy2)
// Remove the identity and observe that it is no longer available
cacheCleared := cache.delete(identity1)
require.True(t, cacheCleared)
_, updated, _ = cache.updateSelectorPolicy(identity1)
require.True(t, updated)
// Attempt to update policy for non-cached endpoint and observe failure
ep3 := testutils.NewTestEndpoint()
ep3.SetIdentity(1234, true)
policy3, updated, err := cache.updateSelectorPolicy(ep3.GetSecurityIdentity())
require.NoError(t, err)
require.True(t, updated)
// policy3 must be different from ep1, ep2
require.NoError(t, err)
require.NotEqual(t, policy1, policy3)
}
// Distillery integration tests
var (
// Identity, labels, selectors for an endpoint named "foo"
identityFoo = identity.NumericIdentity(100)
labelsFoo = labels.ParseSelectLabelArray("foo", "blue")
selectFoo_ = api.NewESFromLabels(labels.ParseSelectLabel("foo"))
allowFooL3_ = selectFoo_
denyFooL3__ = selectFoo_
// Identity, labels, selectors for an endpoint named "bar"
identityBar = identity.NumericIdentity(200)
labelsBar = labels.ParseSelectLabelArray("bar", "blue")
selectBar_ = api.NewESFromLabels(labels.ParseSelectLabel("bar"))
allowBarL3_ = selectBar_
// API rule sections for composability
// L4 rule sections
allowAllL4_ []api.PortRule
allowPort80 = []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}}
allowNamedPort80 = []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}}
denyAllL4_ []api.PortDenyRule
denyPort80 = []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}}
// L7 rule sections
allowHTTPRoot = &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
}
// API rule definitions for default-deny, L3, L3L4, L3L4L7, L4, L4L7
lbls____NoAllow = labels.ParseLabelArray("no-allow")
rule____NoAllow = api.NewRule().
WithLabels(lbls____NoAllow).
WithIngressRules([]api.IngressRule{{}})
lblsL3____Allow = labels.ParseLabelArray("l3-allow")
ruleL3____Allow = api.NewRule().
WithLabels(lblsL3____Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: allowAllL4_,
}})
lblsL3L4__Allow = labels.ParseLabelArray("l3l4-allow")
ruleL3L4__Allow = api.NewRule().
WithLabels(lblsL3L4__Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: allowPort80,
}})
ruleL3npL4__Allow = api.NewRule().
WithLabels(lblsL3L4__Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: allowNamedPort80,
}})
lblsL3L4L7Allow = labels.ParseLabelArray("l3l4l7-allow")
ruleL3L4L7Allow = api.NewRule().
WithLabels(lblsL3L4L7Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: combineL4L7(allowPort80, allowHTTPRoot),
}})
ruleL3npL4L7Allow = api.NewRule().
WithLabels(lblsL3L4L7Allow).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
ToPorts: combineL4L7(allowNamedPort80, allowHTTPRoot),
}})
lbls__L4__Allow = labels.ParseLabelArray("l4-allow")
rule__L4__Allow = api.NewRule().
WithLabels(lbls__L4__Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: allowPort80,
}})
rule__L4__AllowAuth = api.NewRule().
WithLabels(lbls__L4__Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: allowPort80,
Authentication: &api.Authentication{
Mode: api.AuthenticationModeRequired,
},
}})
rule__npL4__Allow = api.NewRule().
WithLabels(lbls__L4__Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: allowNamedPort80,
}})
lbls__L4L7Allow = labels.ParseLabelArray("l4l7-allow")
rule__L4L7Allow = api.NewRule().
WithLabels(lbls__L4L7Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: combineL4L7(allowPort80, allowHTTPRoot),
}})
rule__npL4L7Allow = api.NewRule().
WithLabels(lbls__L4L7Allow).
WithIngressRules([]api.IngressRule{{
ToPorts: combineL4L7(allowNamedPort80, allowHTTPRoot),
}})
lblsL3__AllowFoo = labels.ParseLabelArray("l3-allow-foo")
ruleL3__AllowFoo = api.NewRule().
WithLabels(lblsL3__AllowFoo).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowFooL3_},
},
}})
lblsL3__AllowBar = labels.ParseLabelArray("l3-allow-bar")
ruleL3__AllowBar = api.NewRule().
WithLabels(lblsL3__AllowBar).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowBarL3_},
},
}})
lblsL3L4AllowBar = labels.ParseLabelArray("l3l4-allow-bar")
ruleL3L4AllowBarAuth = api.NewRule().
WithLabels(lblsL3L4AllowBar).
WithIngressRules([]api.IngressRule{{
ToPorts: allowPort80,
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowBarL3_},
},
Authentication: &api.Authentication{
Mode: api.AuthenticationModeAlwaysFail,
},
}})
ruleL3__AllowBarAuth = api.NewRule().
WithLabels(lblsL3__AllowBar).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{allowBarL3_},
},
Authentication: &api.Authentication{
Mode: api.AuthenticationModeAlwaysFail,
},
}})
lbls____AllowAll = labels.ParseLabelArray("allow-all")
rule____AllowAll = api.NewRule().
WithLabels(lbls____AllowAll).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
}})
rule____AllowAllAuth = api.NewRule().
WithLabels(lbls____AllowAll).
WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
Authentication: &api.Authentication{
Mode: api.AuthenticationModeRequired,
},
}})
lblsAllowAllIngress = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved),
}
lbls_____NoDeny = labels.ParseLabelArray("deny")
rule_____NoDeny = api.NewRule().
WithLabels(lbls_____NoDeny).
WithIngressRules([]api.IngressRule{{}})
lblsL3_____Deny = labels.ParseLabelArray("l3-deny")
ruleL3_____Deny = api.NewRule().
WithLabels(lblsL3_____Deny).
WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{denyFooL3__},
},
ToPorts: denyAllL4_,
}})
lbls__L4___Deny = labels.ParseLabelArray("l4-deny")
rule__L4___Deny = api.NewRule().
WithLabels(lbls__L4___Deny).
WithIngressDenyRules([]api.IngressDenyRule{{
ToPorts: denyPort80,
}})
lblsL3L4___Deny = labels.ParseLabelArray("l3l4-deny")
ruleL3L4___Deny = api.NewRule().
WithLabels(lblsL3L4___Deny).
WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{denyFooL3__},
},
ToPorts: denyPort80,
}})
// Desired map keys for L3, L3-dependent L4, L4
mapKeyAllowFoo__ = IngressKey().WithIdentity(identityFoo)
mapKeyAllowBar__ = IngressKey().WithIdentity(identityBar)
mapKeyAllowBarL4 = IngressKey().WithIdentity(identityBar).WithTCPPort(80)
mapKeyAllowFooL4 = IngressKey().WithIdentity(identityFoo).WithTCPPort(80)
mapKeyDeny_Foo__ = mapKeyAllowFoo__
mapKeyDeny_FooL4 = mapKeyAllowFooL4
mapKeyAllow___L4 = IngressKey().WithTCPPort(80)
mapKeyDeny____L4 = mapKeyAllow___L4
mapKeyAllowAll__ = IngressKey()
mapKeyAllowAllE_ = EgressKey()
// Desired map entries for no L7 redirect / redirect to Proxy
mapEntryL7None_ = func(lbls ...labels.LabelArray) mapStateEntry {
return allowEntry().withLabels(lbls)
}
mapEntryL7ExplicitAuth_ = func(at AuthType, lbls ...labels.LabelArray) mapStateEntry {
return allowEntry().withLabels(lbls).withExplicitAuth(at)
}
mapEntryL7DerivedAuth_ = func(at AuthType, lbls ...labels.LabelArray) mapStateEntry {
return allowEntry().withLabels(lbls).withDerivedAuth(at)
}
mapEntryL7Deny = func(lbls ...labels.LabelArray) mapStateEntry {
return denyEntry().withLabels(lbls)
}
mapEntryL7Proxy = func(lbls ...labels.LabelArray) mapStateEntry {
return allowEntry().withLabels(lbls).withProxyPort(1)
}
)
// combineL4L7 returns a new PortRule that refers to the specified l4 ports and
// l7 rules.
func combineL4L7(l4 []api.PortRule, l7 *api.L7Rules) []api.PortRule {
result := make([]api.PortRule, 0, len(l4))
for _, pr := range l4 {
result = append(result, api.PortRule{
Ports: pr.Ports,
Rules: l7,
})
}
return result
}
// policyDistillery is a convenience wrapper around the existing policy engine,
// allowing simple direct evaluation of L3 and L4 state into "MapState".
type policyDistillery struct {
*Repository
log io.Writer
}
func newPolicyDistillery(selectorCache *SelectorCache) *policyDistillery {
ret := &policyDistillery{
Repository: NewPolicyRepository(nil, nil, nil, nil, api.NewPolicyMetricsNoop()),
}
ret.selectorCache = selectorCache
return ret
}
func (d *policyDistillery) WithLogBuffer(w io.Writer) *policyDistillery {
return &policyDistillery{
Repository: d.Repository,
log: w,
}
}
// distillEndpointPolicy distills the policy repository into an EndpointPolicy
// Caller is responsible for Ready() & Detach() when done with the policy
func (d *policyDistillery) distillEndpointPolicy(owner PolicyOwner, identity *identity.Identity) (*EndpointPolicy, error) {
sp, _, err := d.Repository.GetSelectorPolicy(identity, 0, &dummyPolicyStats{})
if err != nil {
return nil, fmt.Errorf("failed to calculate policy: %w", err)
}
epp := sp.DistillPolicy(owner, testRedirects)
if epp == nil {
return nil, errors.New("policy distillation failure")
}
return epp, nil
}
// distillPolicy distills the policy repository into a set of bpf map state
// entries for an endpoint with the specified labels.
func (d *policyDistillery) distillPolicy(owner PolicyOwner, identity *identity.Identity) (mapState, error) {
epp, err := d.distillEndpointPolicy(owner, identity)
if err != nil {
return emptyMapState(), err
}
// Remove the allow-all egress entry that's generated by default. This is
// because this test suite doesn't have a notion of traffic direction, so
// the extra egress allow-all is technically correct, but omitted from the
// expected output that's asserted against for the sake of brevity.
if entry, ok := epp.policyMapState.get(mapKeyAllowAllE_); ok && !entry.IsDeny() {
epp.policyMapState.delete(mapKeyAllowAllE_)
}
epp.Ready()
epp.Detach()
return epp.policyMapState, nil
}
// Perm calls f with each permutation of a.
func Perm[X any](a []X, f func([]X)) {
perm(a, f, 0)
}
// Permute the values at index i to len(a)-1.
func perm[X any](a []X, f func([]X), i int) {
if i > len(a) {
f(a)
return
}
perm(a, f, i+1)
for j := i + 1; j < len(a); j++ {
a[i], a[j] = a[j], a[i]
perm(a, f, i+1)
a[i], a[j] = a[j], a[i]
}
}
func Test_Perm(t *testing.T) {
var res []string
expected := []string{
"abc",
"acb",
"bac",
"bca",
"cba",
"cab",
}
Perm([]rune("abc"), func(x []rune) { res = append(res, string(x)) })
assert.Equal(t, expected, res, "invalid permutations")
}
func testMapState(initMap mapStateMap) mapState {
return emptyMapState().withState(initMap)
}
func Test_MergeL3(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identityFoo: labelsFoo,
identityBar: labelsBar,
}
selectorCache := testNewSelectorCache(identityCache)
type authResult map[identity.NumericIdentity]AuthTypes
tests := []struct {
test int
rules api.Rules
result mapState
auths authResult
}{
{
0,
api.Rules{ruleL3__AllowFoo, ruleL3__AllowBar},
testMapState(mapStateMap{
mapKeyAllowFoo__: mapEntryL7None_(lblsL3__AllowFoo),
mapKeyAllowBar__: mapEntryL7None_(lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{},
identityFoo: AuthTypes{},
},
},
{
1,
api.Rules{ruleL3__AllowFoo, ruleL3L4__Allow},
testMapState(mapStateMap{
mapKeyAllowFoo__: mapEntryL7None_(lblsL3__AllowFoo),
mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow),
}),
authResult{
identityBar: AuthTypes{},
identityFoo: AuthTypes{},
},
},
{
2,
api.Rules{ruleL3__AllowFoo, ruleL3__AllowBarAuth},
testMapState(mapStateMap{
mapKeyAllowFoo__: mapEntryL7None_(lblsL3__AllowFoo),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}},
identityFoo: AuthTypes{},
},
},
{
3,
api.Rules{ruleL3__AllowFoo, ruleL3__AllowBarAuth, rule__L4__AllowAuth},
testMapState(mapStateMap{
mapKeyAllow___L4: mapEntryL7ExplicitAuth_(AuthTypeSpire, lbls__L4__Allow),
mapKeyAllowFoo__: mapEntryL7None_(lblsL3__AllowFoo),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}, AuthTypeSpire: struct{}{}},
identityFoo: AuthTypes{AuthTypeSpire: struct{}{}},
},
},
{
4,
api.Rules{rule____AllowAll, ruleL3__AllowBarAuth},
testMapState(mapStateMap{
mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}},
identityFoo: AuthTypes{},
},
},
{
5,
api.Rules{rule____AllowAllAuth, ruleL3__AllowBar},
testMapState(mapStateMap{
mapKeyAllowAll__: mapEntryL7ExplicitAuth_(AuthTypeSpire, lbls____AllowAll),
mapKeyAllowBar__: mapEntryL7None_(lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeSpire: struct{}{}},
identityFoo: AuthTypes{AuthTypeSpire: struct{}{}},
},
},
{
6,
api.Rules{rule____AllowAllAuth, rule__L4__Allow},
testMapState(mapStateMap{
mapKeyAllowAll__: mapEntryL7ExplicitAuth_(AuthTypeSpire, lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7DerivedAuth_(AuthTypeSpire, lbls__L4__Allow),
}),
authResult{
identityBar: AuthTypes{AuthTypeSpire: struct{}{}},
identityFoo: AuthTypes{AuthTypeSpire: struct{}{}},
},
},
{
7,
api.Rules{rule____AllowAllAuth, ruleL3__AllowBar, rule__L4__Allow},
testMapState(mapStateMap{
mapKeyAllowAll__: mapEntryL7ExplicitAuth_(AuthTypeSpire, lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7DerivedAuth_(AuthTypeSpire, lbls__L4__Allow),
mapKeyAllowBar__: mapEntryL7DerivedAuth_(AuthTypeDisabled, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeSpire: struct{}{}},
identityFoo: AuthTypes{AuthTypeSpire: struct{}{}},
},
},
{
8,
api.Rules{rule____AllowAll, ruleL3__AllowBar, rule__L4__Allow},
testMapState(mapStateMap{
mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow),
mapKeyAllowBar__: mapEntryL7None_(lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{},
identityFoo: AuthTypes{},
},
},
{
9,
api.Rules{rule____AllowAll, rule__L4__Allow, ruleL3__AllowBarAuth},
testMapState(mapStateMap{
mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}},
identityFoo: AuthTypes{},
},
},
{
10, // Same as 9, but the L3L4 entry is created by an explicit rule.
api.Rules{rule____AllowAll, rule__L4__Allow, ruleL3__AllowBarAuth, ruleL3L4AllowBarAuth},
testMapState(mapStateMap{
mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll),
mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow),
mapKeyAllowBar__: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3__AllowBar),
mapKeyAllowBarL4: mapEntryL7ExplicitAuth_(AuthTypeAlwaysFail, lblsL3L4AllowBar),
}),
authResult{
identityBar: AuthTypes{AuthTypeAlwaysFail: struct{}{}},
identityFoo: AuthTypes{},
},
},
}
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
for _, tt := range tests {
for i, r := range tt.rules {
tt.rules[i] = r.WithEndpointSelector(selectFoo_)
}
round := 0
Perm(tt.rules, func(rules []*api.Rule) {
round++
repo := newPolicyDistillery(selectorCache)
_, _ = repo.MustAddList(rules)
t.Run(fmt.Sprintf("permutation_%d-%d", tt.test, round), func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(DummyOwner{}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&tt.result), mapstate.diff(&tt.result)); !equal {
t.Logf("Rules:\n%s\n\n", api.Rules(rules).String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy obtained didn't match expected for endpoint %s:\nObtained: %v\nExpected: %v", labelsFoo, mapstate, tt.result)
}
for remoteID, expectedAuthTypes := range tt.auths {
authTypes := repo.GetAuthTypes(identity.ID, remoteID)
if !maps.Equal(authTypes, expectedAuthTypes) {
t.Errorf("Incorrect AuthTypes result for remote ID %d: obtained %v, expected %v", remoteID, authTypes, expectedAuthTypes)
}
}
})
})
}
}
// The following variables names are derived from the following google sheet
// https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw/edit?usp=sharing
const (
L3L4KeyL3 = iota
L3L4KeyL4
L3L4KeyL7
L3L4KeyDeny
L4KeyL3
L4KeyL4
L4KeyL7
L4KeyDeny
L3KeyL3
L3KeyL4
L3KeyL7
L3KeyDeny
Total
)
// fieldsSet is the representation of the values set in the cells M8-P8, Q8-T8
// and U8-X8.
type fieldsSet struct {
L3 *bool
L4 *bool
L7 *bool
Deny *bool
}
// generatedBPFKey is the representation of the values set in the cells [M:P]6,
// [Q:T]6 and [U:X]6.
type generatedBPFKey struct {
L3L4Key fieldsSet
L4Key fieldsSet
L3Key fieldsSet
}
func parseFieldBool(s string) *bool {
switch s {
case "X":
return nil
case "0":
return func() *bool { a := false; return &a }()
case "1":
return func() *bool { a := true; return &a }()
default:
panic("Unknown value")
}
}
func parseTable(test string) generatedBPFKey {
// Remove all consecutive white space characters and return the charts that
// need want to parse.
fields := strings.Fields(test)
if len(fields) != Total {
panic("Wrong number of expected results")
}
return generatedBPFKey{
L3L4Key: fieldsSet{
L3: parseFieldBool(fields[L3L4KeyL3]),
L4: parseFieldBool(fields[L3L4KeyL4]),
L7: parseFieldBool(fields[L3L4KeyL7]),
Deny: parseFieldBool(fields[L3L4KeyDeny]),
},
L4Key: fieldsSet{
L3: parseFieldBool(fields[L4KeyL3]),
L4: parseFieldBool(fields[L4KeyL4]),
L7: parseFieldBool(fields[L4KeyL7]),
Deny: parseFieldBool(fields[L4KeyDeny]),
},
L3Key: fieldsSet{
L3: parseFieldBool(fields[L3KeyL3]),
L4: parseFieldBool(fields[L3KeyL4]),
L7: parseFieldBool(fields[L3KeyL7]),
Deny: parseFieldBool(fields[L3KeyDeny]),
},
}
}
// testCaseToMapState generates the expected MapState logic. This function is
// an implementation of the expected behavior. Any relation between this
// function and non unit-test code should be seen as coincidental.
// The algorithm represented in this function should be the source of truth
// of our expectations when enforcing multiple types of policies.
func testCaseToMapState(t generatedBPFKey) mapState {
m := emptyMapState()
if t.L3Key.L3 != nil {
if t.L3Key.Deny != nil && *t.L3Key.Deny {
m.upsert(mapKeyDeny_Foo__, mapEntryL7Deny())
} else {
// If L7 is not set or if it explicitly set but it's false
if t.L3Key.L7 == nil || !*t.L3Key.L7 {
m.upsert(mapKeyAllowFoo__, mapEntryL7None_())
}
// there's no "else" because we don't support L3L7 policies, i.e.,
// a L4 port needs to be specified.
}
}
if t.L4Key.L3 != nil {
if t.L4Key.Deny != nil && *t.L4Key.Deny {
m.upsert(mapKeyDeny____L4, mapEntryL7Deny())
} else {
// If L7 is not set or if it explicitly set but it's false
if t.L4Key.L7 == nil || !*t.L4Key.L7 {
m.upsert(mapKeyAllow___L4, mapEntryL7None_())
} else {
// L7 is set and it's true then we should expected a mapEntry
// with L7 redirection.
m.upsert(mapKeyAllow___L4, mapEntryL7Proxy())
}
}
}
if t.L3L4Key.L3 != nil {
if t.L3L4Key.Deny != nil && *t.L3L4Key.Deny {
m.upsert(mapKeyDeny_FooL4, mapEntryL7Deny())
} else {
// If L7 is not set or if it explicitly set but it's false
if t.L3L4Key.L7 == nil || !*t.L3L4Key.L7 {
m.upsert(mapKeyAllowFooL4, mapEntryL7None_())
} else {
// L7 is set and it's true then we should expected a mapEntry
// with L7 redirection only if we haven't set it already
// for an existing L4-only.
if t.L4Key.L7 == nil || !*t.L4Key.L7 {
m.upsert(mapKeyAllowFooL4, mapEntryL7Proxy())
}
}
}
}
return m
}
func generateMapStates() []mapState {
rawTestTable := []string{
"X X X X X X X X X X X X", // 0
"X X X X X X X X 1 0 0 0",
"X X X X 0 1 0 0 X X X X",
"X X X X 0 1 0 0 1 0 0 0",
"1 1 0 0 X X X X X X X X",
"1 1 0 0 X X X X 1 0 0 0", // 5
"X X X X 0 1 0 0 X X X X",
"X X X X 0 1 0 0 1 0 0 0",
"X X X X 0 1 1 0 X X X X",
"X X X X 0 1 1 0 1 0 0 0",
"X X X X 0 1 1 0 X X X X", // 10
"X X X X 0 1 1 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0", // 15
"1 1 1 0 X X X X X X X X",
"1 1 1 0 X X X X 1 0 0 0",
"1 1 1 0 0 1 0 0 X X X X",
"1 1 1 0 0 1 0 0 1 0 0 0",
"1 1 1 0 X X X X X X X X", // 20
"1 1 1 0 X X X X 1 0 0 0",
"1 1 1 0 0 1 0 0 X X X X",
"1 1 1 0 0 1 0 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0", // 25
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X",
"1 1 1 0 0 1 1 0 1 0 0 0",
"1 1 1 0 0 1 1 0 X X X X", // 30
"1 1 1 0 0 1 1 0 1 0 0 0",
"X X X X X X X X 1 0 0 1", // 32
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"X X X X 0 1 0 1 X X X X", // 64
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 1 0 0 1", // 96
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"X X X X 0 1 0 1 1 0 0 1",
"1 1 0 1 X X X X X X X X", // 128
"1 1 0 1 X X X X 1 0 0 0",
"1 1 0 1 0 1 0 0 X X X X",
"1 1 0 1 0 1 0 0 1 0 0 0",
"1 1 0 1 X X X X X X X X",
"1 1 0 1 X X X X 1 0 0 0",
"1 1 0 1 0 1 0 0 X X X X",
"1 1 0 1 0 1 0 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 X X X X X X X X",
"1 1 0 1 X X X X 1 0 0 0",
"1 1 0 1 0 1 0 0 X X X X",
"1 1 0 1 0 1 0 0 1 0 0 0",
"1 1 0 1 X X X X X X X X",
"1 1 0 1 X X X X 1 0 0 0",
"1 1 0 1 0 1 0 0 X X X X",
"1 1 0 1 0 1 0 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"1 1 0 1 0 1 1 0 X X X X",
"1 1 0 1 0 1 1 0 1 0 0 0",
"X X X X X X X X 1 0 0 1", // 160
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"X X X X X X X X 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 0 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"1 1 0 1 0 1 1 0 1 0 0 1",
"X X X X 0 1 0 1 X X X X", // 192
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 0 1 0 1 X X X X",
"X X X X 0 1 0 1 1 0 0 0",
"X X X X 1 1 0 1 1 0 0 1", // 224
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
"X X X X 1 1 0 1 1 0 0 1",
}
mapStates := make([]mapState, 0, len(rawTestTable))
for _, rawTest := range rawTestTable {
testCase := parseTable(rawTest)
mapState := testCaseToMapState(testCase)
mapStates = append(mapStates, mapState)
}
return mapStates
}
func generateRule(testCase int) api.Rules {
rulesIdx := api.Rules{
ruleL3____Allow,
rule__L4__Allow,
ruleL3L4__Allow,
rule__L4L7Allow,
ruleL3L4L7Allow,
// denyIdx
ruleL3_____Deny,
rule__L4___Deny,
ruleL3L4___Deny,
}
rules := make(api.Rules, 0, len(rulesIdx))
for i := len(rulesIdx) - 1; i >= 0; i-- {
if ((testCase >> i) & 0x1) != 0 {
rules = append(rules, rulesIdx[i])
} else {
if i >= 5 { // denyIdx
rules = append(rules, rule_____NoDeny)
} else {
rules = append(rules, rule____NoAllow)
}
}
}
return rules
}
func Test_MergeRules(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
}
selectorCache := testNewSelectorCache(identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test int
rules api.Rules
expected mapState
}{
// The following table is derived from the Google Doc here:
// https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw/edit?usp=sharing
//
// Rule 0 | Rule 1 | Rule 2 | Rule 3 | Rule 4 | Rule 5 | Rule 6 | Rule 7 | Desired BPF map state
{0, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(nil)},
{1, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{2, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule__L4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{3, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule__L4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{4, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3L4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow)})},
{5, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3L4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{6, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3L4__Allow, rule__L4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})}, // identical L3L4 entry suppressed
{7, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3L4__Allow, rule__L4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{8, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})},
{9, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{10, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, rule____NoAllow, rule__L4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})},
{11, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, rule____NoAllow, rule__L4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{12, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, ruleL3L4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{13, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, ruleL3L4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{14, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, ruleL3L4__Allow, rule__L4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{15, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__L4L7Allow, ruleL3L4__Allow, rule__L4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{16, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow)})},
{17, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{18, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, rule____NoAllow, rule__L4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{19, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, rule____NoAllow, rule__L4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{20, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, ruleL3L4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow)})},
{21, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, ruleL3L4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{22, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, ruleL3L4__Allow, rule__L4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{23, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule____NoAllow, ruleL3L4__Allow, rule__L4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{24, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{25, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{26, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, rule____NoAllow, rule__L4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{27, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, rule____NoAllow, rule__L4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{28, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, ruleL3L4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{29, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, ruleL3L4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{30, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, ruleL3L4__Allow, rule__L4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{31, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3L4L7Allow, rule__L4L7Allow, ruleL3L4__Allow, rule__L4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
}
expectedMapState := generateMapStates()
// Add the auto generated test cases for the deny policies
generatedIdx := 32
for i := generatedIdx; i < 256; i++ {
tests = append(tests,
struct {
test int
rules api.Rules
expected mapState
}{
test: i,
rules: generateRule(i),
expected: expectedMapState[i],
})
}
for i, tt := range tests {
repo := newPolicyDistillery(selectorCache)
generatedRule := generateRule(tt.test)
for _, r := range tt.rules {
if r != nil {
rule := r.WithEndpointSelector(selectFoo_)
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(fmt.Sprintf("permutation_%d", tt.test), func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(DummyOwner{}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
// Ignore generated rules as they lap LabelArrayList which would
// make the tests fail.
if i < generatedIdx {
if equal := assert.True(t, mapstate.Equal(&tt.expected), mapstate.diff(&tt.expected)); !equal {
require.EqualExportedValuesf(t, tt.expected, mapstate, "Policy obtained didn't match expected for endpoint %s", labelsFoo)
t.Logf("Rules:\n%s\n\n", tt.rules.String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy obtained didn't match expected for endpoint %s", labelsFoo)
}
}
if equal := assert.EqualExportedValues(t, expectedMapState[tt.test], mapstate); !equal {
t.Logf("Rules:\n%s\n\n", tt.rules.String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Error("Policy obtained didn't match expected for endpoint")
}
if equal := assert.ElementsMatch(t, tt.rules, generatedRule); !equal {
t.Logf("Rules:\n%s\n\n", tt.rules.String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Error("Generated rules didn't match manual rules")
}
})
}
}
func Test_MergeRulesWithNamedPorts(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
}
selectorCache := testNewSelectorCache(identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test int
rules api.Rules
expected mapState
}{
// The following table is derived from the Google Doc here:
// https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw/edit?usp=sharing
//
// Rule 0 | Rule 1 | Rule 2 | Rule 3 | Rule 4 | Rule 5 | Rule 6 | Rule 7 | Desired BPF map state
{0, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(nil)},
{1, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{2, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule__npL4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{3, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule__npL4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{4, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3npL4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow)})},
{5, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3npL4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7None_(lblsL3L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{6, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3npL4__Allow, rule__npL4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})}, // identical L3L4 entry suppressed
{7, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule____NoAllow, ruleL3npL4__Allow, rule__npL4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{8, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})},
{9, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{10, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, rule____NoAllow, rule__npL4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})},
{11, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, rule____NoAllow, rule__npL4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{12, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, ruleL3npL4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{13, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, ruleL3npL4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{14, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, ruleL3npL4__Allow, rule__npL4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{15, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, rule____NoAllow, rule__npL4L7Allow, ruleL3npL4__Allow, rule__npL4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // L3L4 entry suppressed to allow L4-only entry to redirect
{16, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow)})},
{17, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{18, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, rule____NoAllow, rule__npL4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{19, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, rule____NoAllow, rule__npL4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{20, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, ruleL3npL4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow)})},
{21, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, ruleL3npL4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{22, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, ruleL3npL4__Allow, rule__npL4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow)})},
{23, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule____NoAllow, ruleL3npL4__Allow, rule__npL4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllowFooL4: mapEntryL7Proxy(lblsL3L4__Allow, lblsL3L4L7Allow), mapKeyAllow___L4: mapEntryL7None_(lbls__L4__Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})},
{24, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, rule____NoAllow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{25, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, rule____NoAllow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{26, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, rule____NoAllow, rule__npL4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{27, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, rule____NoAllow, rule__npL4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{28, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, ruleL3npL4__Allow, rule____NoAllow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{29, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, ruleL3npL4__Allow, rule____NoAllow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
{30, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, ruleL3npL4__Allow, rule__npL4__Allow, rule____NoAllow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow)})}, // identical L3L4 entry suppressed
{31, api.Rules{rule_____NoDeny, rule_____NoDeny, rule_____NoDeny, ruleL3npL4L7Allow, rule__npL4L7Allow, ruleL3npL4__Allow, rule__npL4__Allow, ruleL3____Allow}, testMapState(mapStateMap{mapKeyAllow___L4: mapEntryL7Proxy(lbls__L4__Allow, lbls__L4L7Allow), mapKeyAllowFoo__: mapEntryL7None_(lblsL3____Allow)})}, // identical L3L4 entry suppressed
}
for _, tt := range tests {
repo := newPolicyDistillery(selectorCache)
for _, r := range tt.rules {
if r != nil {
rule := r.WithEndpointSelector(selectFoo_)
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(fmt.Sprintf("permutation_%d", tt.test), func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(DummyOwner{}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
require.Truef(t, mapstate.Equal(&tt.expected),
"Policy obtained didn't match expected for endpoint %s:\n%s", labelsFoo, mapstate.diff(&tt.expected))
})
}
}
func Test_AllowAll(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identityFoo: labelsFoo,
identityBar: labelsBar,
}
selectorCache := testNewSelectorCache(identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test int
selector api.EndpointSelector
rules api.Rules
expected mapState
}{
{0, api.EndpointSelectorNone, api.Rules{rule____AllowAll}, testMapState(mapStateMap{mapKeyAllowAll__: mapEntryL7None_(lblsAllowAllIngress)})},
{1, api.WildcardEndpointSelector, api.Rules{rule____AllowAll}, testMapState(mapStateMap{mapKeyAllowAll__: mapEntryL7None_(lbls____AllowAll)})},
}
for _, tt := range tests {
repo := newPolicyDistillery(selectorCache)
for _, r := range tt.rules {
if r != nil {
rule := r.WithEndpointSelector(tt.selector)
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(fmt.Sprintf("permutation_%d", tt.test), func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(DummyOwner{}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&tt.expected), mapstate.diff(&tt.expected)); !equal {
t.Logf("Rules:\n%s\n\n", tt.rules.String())
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy obtained didn't match expected for endpoint %s", labelsFoo)
}
})
}
}
var (
ruleAllowAllIngress = api.NewRule().WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
}}}).WithEndpointSelector(api.WildcardEndpointSelector)
ruleL3DenyWorld = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromEntities: api.EntitySlice{api.EntityWorld},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityWorld},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
cpyRule = *ruleL3DenyWorld
ruleL3DenyWorldWithLabels = (&cpyRule).WithLabels(labels.LabelWorld.LabelArray())
worldReservedID = identity.ReservedIdentityWorld
worldReservedIDIPv4 = identity.ReservedIdentityWorldIPv4
worldReservedIDIPv6 = identity.ReservedIdentityWorldIPv6
mapKeyL3WorldIngress = IngressKey().WithIdentity(worldReservedID)
mapKeyL3WorldIngressIPv4 = IngressKey().WithIdentity(worldReservedIDIPv4)
mapKeyL3WorldIngressIPv6 = IngressKey().WithIdentity(worldReservedIDIPv6)
mapKeyL3WorldEgress = EgressKey().WithIdentity(worldReservedID)
mapKeyL3WorldEgressIPv4 = EgressKey().WithIdentity(worldReservedIDIPv4)
mapKeyL3WorldEgressIPv6 = EgressKey().WithIdentity(worldReservedIDIPv6)
AllowEntry = types.AllowEntry()
DenyEntry = types.DenyEntry()
mapEntryDeny = NewMapStateEntry(DenyEntry).withLabels(labels.LabelArrayList{nil})
mapEntryAllow = NewMapStateEntry(AllowEntry).withLabels(labels.LabelArrayList{nil})
worldLabelArrayList = labels.LabelArrayList{labels.LabelWorld.LabelArray()}
mapEntryWorldDenyWithLabels = NewMapStateEntry(DenyEntry).withLabels(worldLabelArrayList)
worldIPIdentity = localIdentity(16324)
worldIPCIDR = api.CIDR("192.0.2.3/32")
lblWorldIP = labels.GetCIDRLabels(netip.MustParsePrefix(string(worldIPCIDR)))
hostIPv4 = api.CIDR("172.19.0.1/32")
hostIPv6 = api.CIDR("fc00:c111::3/64")
lblHostIPv4CIDR = labels.GetCIDRLabels(netip.MustParsePrefix(string(hostIPv4)))
lblHostIPv6CIDR = labels.GetCIDRLabels(netip.MustParsePrefix(string(hostIPv6)))
ruleL3AllowWorldIP = api.NewRule().WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldIPCIDR},
},
}}).WithEgressRules([]api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{worldIPCIDR},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
worldSubnetIdentity = localIdentity(16325)
worldSubnet = api.CIDR("192.0.2.0/24")
worldSubnetRule = api.CIDRRule{
Cidr: worldSubnet,
}
lblWorldSubnet = labels.GetCIDRLabels(netip.MustParsePrefix(string(worldSubnet)))
ruleL3DenySubnet = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: api.CIDRRuleSlice{worldSubnetRule},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{worldSubnetRule},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3SubnetIngress = IngressKey().WithIdentity(worldSubnetIdentity)
mapKeyL3SubnetEgress = EgressKey().WithIdentity(worldSubnetIdentity)
ruleL3DenySmallerSubnet = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: worldIPCIDR}},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: worldIPCIDR}},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
ruleL3AllowLargerSubnet = api.NewRule().WithIngressRules([]api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: worldSubnet}},
},
}}).WithEgressRules([]api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: worldSubnet}},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3SmallerSubnetIngress = IngressKey().WithIdentity(worldIPIdentity)
mapKeyL3SmallerSubnetEgress = EgressKey().WithIdentity(worldIPIdentity)
ruleL3AllowHostEgress = api.NewRule().WithEgressRules([]api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{api.CIDRRule{Cidr: hostIPv4}, api.CIDRRule{Cidr: hostIPv6}},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3UnknownIngress = IngressKey()
mapEntryL3UnknownIngress = newAllowEntryWithLabels(LabelsAllowAnyIngress)
mapKeyL3HostEgress = EgressKey().WithIdentity(identity.ReservedIdentityHost)
ruleL3L4Port8080ProtoAnyDenyWorld = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{
{
ToPorts: api.PortDenyRules{
api.PortDenyRule{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoAny,
},
},
},
},
IngressCommonRule: api.IngressCommonRule{
FromEntities: api.EntitySlice{api.EntityWorld},
},
},
}).WithEgressDenyRules([]api.EgressDenyRule{
{
ToPorts: api.PortDenyRules{
api.PortDenyRule{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoAny,
},
},
},
},
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityWorld},
},
},
}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3L4Port8080ProtoTCPWorldIngress = IngressKey().WithIdentity(worldReservedID).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldEgress = EgressKey().WithIdentity(worldReservedID).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIngress = IngressKey().WithIdentity(worldReservedID).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldEgress = EgressKey().WithIdentity(worldReservedID).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIngress = IngressKey().WithIdentity(worldReservedID).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldEgress = EgressKey().WithIdentity(worldReservedID).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPv4Ingress = IngressKey().WithIdentity(worldReservedIDIPv4).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPv4Egress = EgressKey().WithIdentity(worldReservedIDIPv4).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPv4Ingress = IngressKey().WithIdentity(worldReservedIDIPv4).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPv4Egress = EgressKey().WithIdentity(worldReservedIDIPv4).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPv4Ingress = IngressKey().WithIdentity(worldReservedIDIPv4).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPv4Egress = EgressKey().WithIdentity(worldReservedIDIPv4).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPv6Ingress = IngressKey().WithIdentity(worldReservedIDIPv6).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPv6Egress = EgressKey().WithIdentity(worldReservedIDIPv6).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPv6Ingress = IngressKey().WithIdentity(worldReservedIDIPv6).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPv6Egress = EgressKey().WithIdentity(worldReservedIDIPv6).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPv6Ingress = IngressKey().WithIdentity(worldReservedIDIPv6).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPv6Egress = EgressKey().WithIdentity(worldReservedIDIPv6).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldSNIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldSNEgress = EgressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldSNIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldSNEgress = EgressKey().WithIdentity(worldSubnetIdentity).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldSNIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldSNEgress = EgressKey().WithIdentity(worldSubnetIdentity).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoTCPWorldIPEgress = EgressKey().WithIdentity(worldIPIdentity).WithTCPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoUDPWorldIPEgress = EgressKey().WithIdentity(worldIPIdentity).WithUDPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithSCTPPort(8080)
mapKeyL3L4Port8080ProtoSCTPWorldIPEgress = EgressKey().WithIdentity(worldIPIdentity).WithSCTPPort(8080)
ruleL3AllowWorldSubnet = api.NewRule().WithIngressRules([]api.IngressRule{{
ToPorts: api.PortRules{
api.PortRule{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoAny,
},
},
},
},
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldSubnet},
},
}}).WithEgressRules([]api.EgressRule{{
ToPorts: api.PortRules{
api.PortRule{
Ports: []api.PortProtocol{
{
Port: "8080",
Protocol: api.ProtoAny,
},
},
},
},
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{worldSubnet},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
ruleL3DenyWorldIP = api.NewRule().WithIngressDenyRules([]api.IngressDenyRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldIPCIDR},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{worldIPCIDR},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyAnyIngress = IngressKey()
mapKeyL4AnyPortProtoWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity)
mapKeyL4AnyPortProtoWorldIPEgress = EgressKey().WithIdentity(worldIPIdentity)
ruleL3AllowWorldSubnetNamedPort = api.NewRule().WithIngressRules([]api.IngressRule{{
ToPorts: api.PortRules{
api.PortRule{
Ports: []api.PortProtocol{
{
Port: "http",
Protocol: api.ProtoTCP,
},
},
},
},
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldSubnet},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3L4NamedPortHTTPProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(80)
mapKeyL3L4NamedPortHTTPProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPort(80)
ruleL3AllowWorldSubnetPortRange = api.NewRule().WithIngressRules([]api.IngressRule{{
ToPorts: api.PortRules{
api.PortRule{
Ports: []api.PortProtocol{
{
Port: "64",
EndPort: 127,
Protocol: api.ProtoTCP,
},
{
Port: "5",
EndPort: 10,
Protocol: api.ProtoTCP,
},
},
},
},
IngressCommonRule: api.IngressCommonRule{
FromCIDR: api.CIDRSlice{worldSubnet},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
mapKeyL3L4Port64To127ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPortPrefix(64, 10)
mapKeyL3L4Port5ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(5)
mapKeyL3L4Port6To7ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPortPrefix(6, 15)
mapKeyL3L4Port8To9ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPortPrefix(8, 15)
mapKeyL3L4Port10ProtoTCPWorldSubNetIngress = IngressKey().WithIdentity(worldSubnetIdentity).WithTCPPort(10)
mapKeyL3L4Port64To127ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPortPrefix(64, 10)
mapKeyL3L4Port5ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPort(5)
mapKeyL3L4Port6To7ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPortPrefix(6, 15)
mapKeyL3L4Port8To9ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPortPrefix(8, 15)
mapKeyL3L4Port10ProtoTCPWorldIPIngress = IngressKey().WithIdentity(worldIPIdentity).WithTCPPort(10)
)
func Test_EnsureDeniesPrecedeAllows(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
identity.ReservedIdentityWorld: labels.LabelWorld.LabelArray(),
identity.ReservedIdentityWorldIPv4: labels.LabelWorldIPv4.LabelArray(),
identity.ReservedIdentityWorldIPv6: labels.LabelWorldIPv6.LabelArray(),
worldIPIdentity: lblWorldIP.LabelArray(), // "192.0.2.3/32"
worldSubnetIdentity: lblWorldSubnet.LabelArray(), // "192.0.2.0/24"
}
selectorCache := testNewSelectorCache(identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test string
rules api.Rules
expected mapState
}{
{"deny_world_no_labels", api.Rules{ruleAllowAllIngress, ruleL3DenyWorld, ruleL3AllowWorldIP}, testMapState(mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3WorldIngress: mapEntryDeny,
mapKeyL3WorldIngressIPv4: mapEntryDeny,
mapKeyL3WorldIngressIPv6: mapEntryDeny,
mapKeyL3WorldEgress: mapEntryDeny,
mapKeyL3WorldEgressIPv4: mapEntryDeny,
mapKeyL3WorldEgressIPv6: mapEntryDeny,
mapKeyL3SubnetIngress: mapEntryDeny,
mapKeyL3SubnetEgress: mapEntryDeny,
mapKeyL3SmallerSubnetIngress: mapEntryDeny,
mapKeyL3SmallerSubnetEgress: mapEntryDeny,
})}, {"deny_world_with_labels", api.Rules{ruleAllowAllIngress, ruleL3DenyWorldWithLabels, ruleL3AllowWorldIP}, testMapState(mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3WorldIngress: mapEntryWorldDenyWithLabels,
mapKeyL3WorldIngressIPv4: mapEntryWorldDenyWithLabels,
mapKeyL3WorldIngressIPv6: mapEntryWorldDenyWithLabels,
mapKeyL3WorldEgress: mapEntryWorldDenyWithLabels,
mapKeyL3WorldEgressIPv4: mapEntryWorldDenyWithLabels,
mapKeyL3WorldEgressIPv6: mapEntryWorldDenyWithLabels,
mapKeyL3SubnetIngress: mapEntryWorldDenyWithLabels,
mapKeyL3SubnetEgress: mapEntryWorldDenyWithLabels,
mapKeyL3SmallerSubnetIngress: mapEntryWorldDenyWithLabels,
mapKeyL3SmallerSubnetEgress: mapEntryWorldDenyWithLabels,
})}, {"deny_one_ip_with_a_larger_subnet", api.Rules{ruleAllowAllIngress, ruleL3DenySubnet, ruleL3AllowWorldIP}, testMapState(mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3SubnetIngress: mapEntryDeny,
mapKeyL3SubnetEgress: mapEntryDeny,
mapKeyL3SmallerSubnetIngress: mapEntryDeny,
mapKeyL3SmallerSubnetEgress: mapEntryDeny,
})}, {"deny_part_of_a_subnet_with_an_ip", api.Rules{ruleAllowAllIngress, ruleL3DenySmallerSubnet, ruleL3AllowLargerSubnet}, testMapState(mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3SmallerSubnetIngress: mapEntryDeny,
mapKeyL3SmallerSubnetEgress: mapEntryDeny,
mapKeyL3SubnetIngress: mapEntryAllow,
mapKeyL3SubnetEgress: mapEntryAllow,
})}, {"broad_cidr_deny_is_a_portproto_subset_of_a_specific_cidr_allow", api.Rules{ruleAllowAllIngress, ruleL3L4Port8080ProtoAnyDenyWorld, ruleL3AllowWorldIP}, testMapState(mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoTCPWorldIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPv4Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPv4Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPv4Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPv4Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPv4Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPv4Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPv6Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPv6Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPv6Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPv6Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPv6Ingress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPv6Egress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldSNIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldSNEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldSNIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldSNEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldSNIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldSNEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoTCPWorldIPEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoUDPWorldIPEgress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPIngress: mapEntryDeny,
mapKeyL3L4Port8080ProtoSCTPWorldIPEgress: mapEntryDeny,
mapKeyL3SmallerSubnetIngress: mapEntryAllow,
mapKeyL3SmallerSubnetEgress: mapEntryAllow,
})}, {"broad_cidr_allow_is_a_portproto_subset_of_a_specific_cidr_deny", api.Rules{ruleAllowAllIngress, ruleL3AllowWorldSubnet, ruleL3DenyWorldIP}, testMapState(mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoTCPWorldSNIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoTCPWorldSNEgress: mapEntryAllow,
mapKeyL3L4Port8080ProtoUDPWorldSNIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoUDPWorldSNEgress: mapEntryAllow,
mapKeyL3L4Port8080ProtoSCTPWorldSNIngress: mapEntryAllow,
mapKeyL3L4Port8080ProtoSCTPWorldSNEgress: mapEntryAllow,
mapKeyL4AnyPortProtoWorldIPIngress: mapEntryDeny,
mapKeyL4AnyPortProtoWorldIPEgress: mapEntryDeny,
})}, {"named_port_world_subnet", api.Rules{ruleAllowAllIngress, ruleL3AllowWorldSubnetNamedPort}, testMapState(mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3L4NamedPortHTTPProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4NamedPortHTTPProtoTCPWorldIPIngress: mapEntryAllow,
})}, {"port_range_world_subnet", api.Rules{ruleAllowAllIngress, ruleL3AllowWorldSubnetPortRange}, testMapState(mapStateMap{
mapKeyAnyIngress: mapEntryAllow,
mapKeyL3L4Port64To127ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port5ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port6To7ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port8To9ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port10ProtoTCPWorldSubNetIngress: mapEntryAllow,
mapKeyL3L4Port64To127ProtoTCPWorldIPIngress: mapEntryAllow,
mapKeyL3L4Port5ProtoTCPWorldIPIngress: mapEntryAllow,
mapKeyL3L4Port6To7ProtoTCPWorldIPIngress: mapEntryAllow,
mapKeyL3L4Port8To9ProtoTCPWorldIPIngress: mapEntryAllow,
mapKeyL3L4Port10ProtoTCPWorldIPIngress: mapEntryAllow,
})},
}
// Do not test in dualstack mode
defer func(ipv4, ipv6 bool) {
option.Config.EnableIPv4 = ipv4
option.Config.EnableIPv6 = ipv6
}(option.Config.EnableIPv4, option.Config.EnableIPv6)
option.Config.EnableIPv4 = true
option.Config.EnableIPv6 = false
for _, tt := range tests {
repo := newPolicyDistillery(selectorCache)
for _, rule := range tt.rules {
if rule != nil {
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(tt.test, func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(DummyOwner{}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&tt.expected), mapstate.diff(&tt.expected)); !equal {
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
})
}
}
var (
allIPv4 = api.CIDR("0.0.0.0/0")
lblAllIPv4 = labels.ParseSelectLabelArray(fmt.Sprintf("%s:%s", labels.LabelSourceCIDR, allIPv4))
one3Z8 = api.CIDR("1.0.0.0/8")
one3Z8Identity = localIdentity(16331)
lblOne3Z8 = labels.ParseSelectLabelArray(fmt.Sprintf("%s:%s", labels.LabelSourceCIDR, one3Z8))
one0Z32 = api.CIDR("1.1.1.1/32")
one0Z32Identity = localIdentity(16332)
lblOne0Z32 = labels.ParseSelectLabelArray(fmt.Sprintf("%s:%s", labels.LabelSourceCIDR, one0Z32))
ruleAllowEgressDenyCIDRSet = api.NewRule().WithEgressRules([]api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{allIPv4},
},
}}).WithEgressDenyRules([]api.EgressDenyRule{{
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: api.CIDRRuleSlice{
api.CIDRRule{
Cidr: one3Z8,
ExceptCIDRs: []api.CIDR{one0Z32},
},
},
},
}}).WithEndpointSelector(api.WildcardEndpointSelector)
)
// Allow-ception tests that an allow within a deny within an allow
// is properly calculated.
func Test_Allowception(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
identity.ReservedIdentityWorld: append(labels.LabelWorld.LabelArray(), lblAllIPv4...),
one3Z8Identity: lblOne3Z8, // 16331 (0x3fcb): ["1.0.0.0/8"]
one0Z32Identity: lblOne0Z32, // 16332 (0x3fcc): ["1.1.1.1/32"]
}
selectorCache := testNewSelectorCache(identityCache)
computedMapStateForAllowCeption := emptyMapState().withState(mapStateMap{
ingressKey(0, 0, 0, 0): mapEntryL7None_(lblsAllowAllIngress),
egressKey(identity.ReservedIdentityWorld, 0, 0, 0): mapEntryAllow,
egressKey(one3Z8Identity, 0, 0, 0): mapEntryDeny,
egressKey(one0Z32Identity, 0, 0, 0): mapEntryAllow,
})
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
// Do not test in dualstack mode
defer func(ipv4, ipv6 bool) {
option.Config.EnableIPv4 = ipv4
option.Config.EnableIPv6 = ipv6
}(option.Config.EnableIPv4, option.Config.EnableIPv6)
option.Config.EnableIPv4 = true
option.Config.EnableIPv6 = false
repo := newPolicyDistillery(selectorCache)
rules := api.Rules{ruleAllowEgressDenyCIDRSet}
for _, rule := range rules {
if rule != nil {
_, _ = repo.MustAddList(api.Rules{rule})
}
}
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(DummyOwner{}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&computedMapStateForAllowCeption), mapstate.diff(&computedMapStateForAllowCeption)); !equal {
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy obtained didn't match expected for endpoint %s", labelsFoo)
}
}
func Test_EnsureEntitiesSelectableByCIDR(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
hostLabel := labels.NewFrom(labels.LabelHost)
hostLabel.MergeLabels(lblHostIPv4CIDR)
hostLabel.MergeLabels(lblHostIPv6CIDR)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
identity.ReservedIdentityHost: hostLabel.LabelArray(),
}
selectorCache := testNewSelectorCache(identityCache)
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(identityFoo), labelsFoo)
tests := []struct {
test string
rules api.Rules
expected mapState
}{
{"host_cidr_select", api.Rules{ruleL3AllowHostEgress}, emptyMapState().withState(mapStateMap{
mapKeyL3UnknownIngress: mapEntryL3UnknownIngress,
mapKeyL3HostEgress: mapEntryAllow,
})},
}
for _, tt := range tests {
repo := newPolicyDistillery(selectorCache)
for _, rule := range tt.rules {
if rule != nil {
_, _ = repo.MustAddList(api.Rules{rule})
}
}
t.Run(tt.test, func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
mapstate, err := repo.distillPolicy(DummyOwner{}, identity)
if err != nil {
t.Errorf("Policy resolution failure: %s", err)
}
if equal := assert.True(t, mapstate.Equal(&tt.expected), mapstate.diff(&tt.expected)); !equal {
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
})
}
}
func addCIDRIdentity(prefix string, c identity.IdentityMap) identity.NumericIdentity {
lbls := labels.GetCIDRLabels(netip.MustParsePrefix(prefix)).LabelArray()
// return an existing id?
for id, ls := range c {
if ls.Equals(lbls) {
return id
}
}
// Find next free id
id := identity.IdentityScopeLocal
for {
if _, exists := c[id]; !exists {
c[id] = lbls
return id
}
id++
}
}
func addFQDNIdentity(fqdnSel api.FQDNSelector, c identity.IdentityMap) (id identity.NumericIdentity, adds identity.IdentityMap) {
lbls := labels.Labels{}
l := fqdnSel.IdentityLabel()
lbls[l.Key] = l
lblA := lbls.LabelArray()
// return an existing id?
for id, ls := range c {
if ls.Equals(lblA) {
return id, nil
}
}
// Find next free id
id = identity.IdentityScopeLocal
for {
if _, exists := c[id]; !exists {
return id, identity.IdentityMap{id: lblA}
}
id++
}
}
// Validate that incrementally deleted identities are handled properly when present in both CIDR and FQDN rules.
func Test_IncrementalFQDNDeletion(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
// load in standard reserved identities
identityCache := identity.IdentityMap{
fooIdentity.ID: fooIdentity.LabelArray,
}
identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
identityCache[ni] = id.Labels.LabelArray()
})
id2 := addCIDRIdentity("192.0.2.0/24", identityCache)
id3 := addCIDRIdentity("192.0.3.0/24", identityCache)
selectorCache := testNewSelectorCache(identityCache)
fqdnSel := api.FQDNSelector{MatchName: "www.example.com"}
idExample, fqdnIdentities := addFQDNIdentity(fqdnSel, identityCache)
tests := []struct {
test string
rules api.Rules
expected MapStateMap
fqdnIds identity.IdentityMap
adds MapStateMap
}{{
test: "incremental_fqdn_deletion",
rules: api.Rules{
&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("foo")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"192.0.2.0/24",
"192.0.3.0/24",
},
},
},
{
ToFQDNs: api.FQDNSelectorSlice{fqdnSel},
},
},
},
},
expected: MapStateMap{
mapKeyAllowAll__: AllowEntry,
egressL3OnlyKey(id2): AllowEntry,
egressL3OnlyKey(id3): AllowEntry,
},
fqdnIds: maps.Clone(fqdnIdentities),
adds: MapStateMap{
egressL3OnlyKey(idExample): AllowEntry,
},
}}
for _, tt := range tests {
repo := newPolicyDistillery(selectorCache)
repo.MustAddList(tt.rules)
t.Run(tt.test, func(t *testing.T) {
logBuffer := new(bytes.Buffer)
repo = repo.WithLogBuffer(logBuffer)
epp, err := repo.distillEndpointPolicy(DummyOwner{}, fooIdentity)
if err != nil {
t.Fatal(err)
}
mapstate := epp.policyMapState
if equal := assert.True(t, mapstate.Equals(tt.expected), mapstate.Diff(tt.expected)); !equal {
t.Logf("Policy Trace: \n%s\n", logBuffer.String())
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
wg := &sync.WaitGroup{}
selectorCache.UpdateIdentities(tt.fqdnIds, nil, wg)
wg.Wait()
closer, changes := epp.ConsumeMapChanges()
adds := MapStateMap{}
for k := range changes.Adds {
adds[k] = epp.policyMapState.entries[k].MapStateEntry
}
closer()
if equal := assert.True(t, maps.Equal(adds, tt.adds), adds.Diff(tt.adds)); !equal {
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
// let fqdn ID expire
wg = &sync.WaitGroup{}
selectorCache.UpdateIdentities(nil, tt.fqdnIds, wg)
wg.Wait()
closer, changes = epp.ConsumeMapChanges()
closer()
if equal := assert.True(t, epp.policyMapState.Equals(tt.expected), mapstate.Diff(tt.expected)); !equal {
t.Errorf("Policy test, %q, obtained didn't match expected for endpoint %s", tt.test, labelsFoo)
}
epp.Ready()
epp.Detach()
})
}
}
// allowsKey returns returns true if 'ms' allows "traffic" with 'key'
func (ms *mapState) allowsKey(key Key) bool {
entry, _ := ms.lookup(key)
return !entry.IsDeny()
}
func TestEgressPortRangePrecedence(t *testing.T) {
td := newTestData()
identityCache := identity.IdentityMap{
identity.NumericIdentity(100): labelsA,
}
td.sc.UpdateIdentities(identityCache, nil, &sync.WaitGroup{})
identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(100), labelsA)
type portRange struct {
startPort, endPort uint16
isAllow bool
}
tests := []struct {
name string
rules []portRange
rangeTests []portRange
}{
{
name: "deny range (1-1024) covers port allow (80)",
rules: []portRange{
{80, 0, true},
{1, 1024, false},
},
rangeTests: []portRange{
{79, 81, false},
{1023, 1025, false},
},
},
{
name: "deny port (80) in broader allow range (1-1024)",
rules: []portRange{
{80, 0, false},
{1, 1024, true},
},
rangeTests: []portRange{
{1, 2, true},
{79, 0, true},
{80, 0, false},
{81, 0, true},
{1023, 1024, true},
{1025, 1026, false},
},
},
{
name: "wildcard deny (*) covers broad allow range (1-1024)",
rules: []portRange{
{0, 0, false},
{1, 1024, true},
},
rangeTests: []portRange{
{1, 2, false},
{1023, 1025, false},
},
},
{
name: "wildcard allow (*) has an deny range hole (1-1024)",
rules: []portRange{
{0, 0, true},
{1, 1024, false},
},
rangeTests: []portRange{
{1, 2, false},
{1023, 1024, false},
{1025, 1026, true},
{65534, 0, true},
},
},
{
name: "two allow ranges (80-90, 90-100) with overlapping deny (85-95)",
rules: []portRange{
{80, 90, true},
{85, 95, false},
{90, 100, true},
},
rangeTests: []portRange{
{79, 0, false},
{80, 84, true},
{85, 95, false},
{96, 100, true},
{101, 0, true},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tr := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
},
}
for _, rul := range tt.rules {
pp := api.PortProtocol{
Port: fmt.Sprintf("%d", rul.startPort),
EndPort: int32(rul.endPort),
Protocol: api.ProtoTCP,
}
if rul.isAllow {
tr.Rule.Egress = append(tr.Rule.Egress, api.EgressRule{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{pp},
}},
})
} else {
tr.Rule.EgressDeny = append(tr.Rule.EgressDeny, api.EgressDenyRule{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{pp},
}},
})
}
}
buffer := new(bytes.Buffer)
ctxFromA := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctxFromA.Logging = stdlog.New(buffer, "", 0)
defer t.Log(buffer)
require.NoError(t, tr.Sanitize())
state := traceState{}
res, err := tr.resolveEgressPolicy(td.testPolicyContext, &ctxFromA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
repo := newPolicyDistillery(td.sc)
repo.MustAddList(api.Rules{&tr.Rule})
repo = repo.WithLogBuffer(buffer)
mapstate, err := repo.distillPolicy(DummyOwner{}, identity)
require.NoError(t, err)
require.NotNil(t, mapstate)
for _, rt := range tt.rangeTests {
for i := rt.startPort; i <= rt.endPort; i++ {
ctxFromA.DPorts = []*models.Port{{Port: i, Protocol: models.PortProtocolTCP}}
key := EgressKey().WithIdentity(identity.ID).WithTCPPort(i)
if rt.isAllow {
// IngressCoversContext just checks the "From" labels of the search context.
require.Equalf(t, api.Allowed.String(), res.IngressCoversContext(&ctxFromA).String(), "Requesting port %d", i)
require.Truef(t, mapstate.allowsKey(key), "key (%v) not allowed", key)
} else {
// IngressCoversContext just checks the "From" labels of the search context.
require.Equalf(t, api.Denied.String(), res.IngressCoversContext(&ctxFromA).String(), "Requesting port %d", i)
require.Falsef(t, mapstate.allowsKey(key), "key (%v) allowed", key)
}
}
}
})
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"sync"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/lock"
)
// Endpoint refers to any structure which has the following properties:
// * a node-local ID stored as a uint16
// * a security identity
// * a means of incrementing its policy revision
// * a means of checking if it represents a node or a pod.
// * a set of labels
// * a kubernetes namespace
type Endpoint interface {
GetID16() uint16
GetSecurityIdentity() (*identity.Identity, error)
PolicyRevisionBumpEvent(rev uint64)
IsHost() bool
GetOpLabels() []string
GetK8sNamespace() string
}
// EndpointSet is used to be able to group together a given set of Endpoints
// that need to have a specific operation performed upon them (e.g., policy
// revision updates).
type EndpointSet struct {
mutex lock.RWMutex
endpoints map[Endpoint]struct{}
}
// NewEndpointSet returns an EndpointSet with the given Endpoints map
func NewEndpointSet(m map[Endpoint]struct{}) *EndpointSet {
if m != nil {
return &EndpointSet{
endpoints: m,
}
}
return &EndpointSet{
endpoints: map[Endpoint]struct{}{},
}
}
// ForEachGo runs epFunc asynchronously inside a goroutine for each endpoint in
// the EndpointSet. It signals to the provided WaitGroup when epFunc has been
// executed for each endpoint.
func (e *EndpointSet) ForEachGo(wg *sync.WaitGroup, epFunc func(epp Endpoint)) {
e.mutex.RLock()
defer e.mutex.RUnlock()
wg.Add(len(e.endpoints))
for ep := range e.endpoints {
go func(eppp Endpoint) {
epFunc(eppp)
wg.Done()
}(ep)
}
}
// Delete removes ep from the EndpointSet.
func (e *EndpointSet) Delete(ep Endpoint) {
e.mutex.Lock()
delete(e.endpoints, ep)
e.mutex.Unlock()
}
// Insert adds ep to the EndpointSet.
func (e *EndpointSet) Insert(ep Endpoint) {
e.mutex.Lock()
e.endpoints[ep] = struct{}{}
e.mutex.Unlock()
}
// Len returns the number of elements in the EndpointSet.
func (e *EndpointSet) Len() (nElem int) {
e.mutex.RLock()
nElem = len(e.endpoints)
e.mutex.RUnlock()
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"encoding/json"
"fmt"
"math/bits"
"sort"
"strconv"
"strings"
"sync/atomic"
"unique"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/sirupsen/logrus"
k8sTypes "k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/container/bitlpm"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/iana"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/u8proto"
)
type AuthType = types.AuthType
type AuthTypes = types.AuthTypes
type AuthRequirement = types.AuthRequirement
// authmap maps remote selectors to their needed AuthTypes, if any
type authMap map[CachedSelector]types.AuthTypes
// covers returns true if 'l4rule' has the effect needed for the 'l3l4rule', when 'l4rule' is added
// to the datapath, due to the l4-only rule matching if l3l4-rule is not present. This determination
// can be done here only when both rules have the same port number (or both have a wildcarded port).
func (l4rule *PerSelectorPolicy) covers(l3l4rule *PerSelectorPolicy) bool {
// Deny takes highest precedence so it is dealt with first
if l4rule != nil && l4rule.IsDeny {
// l4-only deny takes precedence
return true
} else if l3l4rule != nil && l3l4rule.IsDeny {
// Must not skip if l3l4 rule is deny while l4-only rule is not
return false
}
// Can not skip if rules have different auth types. In all other cases the auth type from
// the wildcardRule can be used also for the current rule.
// Note that the caller must deal with inheriting redirect from wildcardRule to currentRule,
// if any.
if l3l4rule.getAuthRequirement() != l4rule.getAuthRequirement() {
return false
}
l3l4IsRedirect := l3l4rule.IsRedirect()
l4OnlyIsRedirect := l4rule.IsRedirect()
if l3l4IsRedirect && !l4OnlyIsRedirect {
// Can not skip if l3l4-rule is redirect while l4-only is not
return false
} else if l3l4IsRedirect && l4OnlyIsRedirect &&
(l3l4rule.Listener != l4rule.Listener || l3l4rule.Priority != l4rule.Priority) {
// L3l4 rule has a different listener or priority, it can not be skipped
return false
}
// else can skip
return true
}
// TLS context holds the secret values resolved from an 'api.TLSContext'
type TLSContext struct {
TrustedCA string `json:"trustedCA,omitempty"`
CertificateChain string `json:"certificateChain,omitempty"`
PrivateKey string `json:"privateKey,omitempty"`
// Secret holds the name of the Secret that was referenced in the Policy
Secret k8sTypes.NamespacedName
// FromFile is true if the values in the keys above were read from the filesystem
// and not a Kubernetes Secret
FromFile bool
}
// Equal returns true if 'a' and 'b' have the same contents.
func (a *TLSContext) Equal(b *TLSContext) bool {
return a == nil && b == nil || a != nil && b != nil && *a == *b
}
// MarshalJSON marsahls a redacted version of the TLSContext. We want
// to see which fields are present, but not reveal their values in any
// logs, etc.
func (t *TLSContext) MarshalJSON() ([]byte, error) {
type tlsContext TLSContext
var redacted tlsContext
if t.TrustedCA != "" {
redacted.TrustedCA = "[redacted]"
}
if t.CertificateChain != "" {
redacted.CertificateChain = "[redacted]"
}
if t.PrivateKey != "" {
redacted.PrivateKey = "[redacted]"
}
return json.Marshal(&redacted)
}
type StringSet map[string]struct{}
func (a StringSet) Equal(b StringSet) bool {
if len(a) != len(b) {
return false
}
for k := range a {
if _, exists := b[k]; !exists {
return false
}
}
return true
}
// NewStringSet returns a StringSet initialized from slice of strings.
// Returns nil for an empty slice
func NewStringSet(from []string) StringSet {
if len(from) == 0 {
return nil
}
set := make(StringSet, len(from))
for _, s := range from {
set[s] = struct{}{}
}
return set
}
// Merge returns StringSet with strings from both a and b.
// Returns a or b, possibly with modifications.
func (a StringSet) Merge(b StringSet) StringSet {
if len(a) == 0 {
return b
}
for s := range b {
a[s] = struct{}{}
}
return a
}
// PerSelectorPolicy contains policy rules for a CachedSelector, i.e. for a
// selection of numerical identities.
type PerSelectorPolicy struct {
// TerminatingTLS is the TLS context for the connection terminated by
// the L7 proxy. For egress policy this specifies the server-side TLS
// parameters to be applied on the connections originated from the local
// POD and terminated by the L7 proxy. For ingress policy this specifies
// the server-side TLS parameters to be applied on the connections
// originated from a remote source and terminated by the L7 proxy.
TerminatingTLS *TLSContext `json:"terminatingTLS,omitempty"`
// OriginatingTLS is the TLS context for the connections originated by
// the L7 proxy. For egress policy this specifies the client-side TLS
// parameters for the upstream connection originating from the L7 proxy
// to the remote destination. For ingress policy this specifies the
// client-side TLS parameters for the connection from the L7 proxy to
// the local POD.
OriginatingTLS *TLSContext `json:"originatingTLS,omitempty"`
// ServerNames is a list of allowed TLS SNI values. If not empty, then
// TLS must be present and one of the provided SNIs must be indicated in the
// TLS handshake.
ServerNames StringSet `json:"serverNames,omitempty"`
// isRedirect is 'true' when traffic must be redirected
isRedirect bool `json:"-"`
// Listener is an optional fully qualified name of a Envoy Listner defined in a CiliumEnvoyConfig CRD that should be
// used for this traffic instead of the default listener
Listener string `json:"listener,omitempty"`
// Priority of the listener used when multiple listeners would apply to the same
// MapStateEntry.
// Lower numbers indicate higher priority. Except for the default 0, which indicates the
// lowest priority. If higher priority desired, a low unique number like 1, 2, or 3 should
// be explicitly specified here.
Priority uint8 `json:"priority,omitempty"`
// Pre-computed HTTP rules, computed after rule merging is complete
EnvoyHTTPRules *cilium.HttpNetworkPolicyRules `json:"-"`
// CanShortCircuit is true if all 'EnvoyHTTPRules' may be
// short-circuited by other matches.
CanShortCircuit bool `json:"-"`
api.L7Rules
// Authentication is the kind of cryptographic authentication required for the traffic to be allowed
// at L3, if any.
Authentication *api.Authentication `json:"auth,omitempty"`
// IsDeny is set if this L4Filter contains should be denied
IsDeny bool `json:",omitempty"`
}
// Equal returns true if 'a' and 'b' represent the same L7 Rules
func (a *PerSelectorPolicy) Equal(b *PerSelectorPolicy) bool {
return a == nil && b == nil || a != nil && b != nil &&
a.TerminatingTLS.Equal(b.TerminatingTLS) &&
a.OriginatingTLS.Equal(b.OriginatingTLS) &&
a.ServerNames.Equal(b.ServerNames) &&
a.isRedirect == b.isRedirect &&
a.Listener == b.Listener &&
a.Priority == b.Priority &&
(a.Authentication == nil && b.Authentication == nil || a.Authentication != nil && a.Authentication.DeepEqual(b.Authentication)) &&
a.IsDeny == b.IsDeny &&
a.L7Rules.DeepEqual(&b.L7Rules)
}
// GetListener returns the listener of the PerSelectorPolicy.
func (a *PerSelectorPolicy) GetListener() string {
if a == nil {
return ""
}
return a.Listener
}
// GetPriority returns the pritority of the listener of the PerSelectorPolicy.
func (a *PerSelectorPolicy) GetPriority() uint8 {
if a == nil {
return 0
}
return a.Priority
}
// getAuthType returns AuthType for the api.Authentication
func getAuthType(auth *api.Authentication) (bool, AuthType) {
if auth == nil {
return false, types.AuthTypeDisabled
}
switch auth.Mode {
case api.AuthenticationModeDisabled:
return true, types.AuthTypeDisabled
case api.AuthenticationModeRequired:
return true, types.AuthTypeSpire
case api.AuthenticationModeAlwaysFail:
return true, types.AuthTypeAlwaysFail
default:
return false, types.AuthTypeDisabled
}
}
// getAuthType returns the AuthType of the L4Filter.
func (a *PerSelectorPolicy) getAuthType() (bool, AuthType) {
if a == nil {
return false, types.AuthTypeDisabled
}
return getAuthType(a.Authentication)
}
// GetAuthRequirement returns the AuthRequirement of the L4Filter.
func (a *PerSelectorPolicy) getAuthRequirement() AuthRequirement {
if a == nil {
return AuthRequirement(types.AuthTypeDisabled)
}
explicit, authType := getAuthType(a.Authentication)
req := AuthRequirement(authType)
if explicit {
req |= types.AuthTypeIsExplicit
}
return req
}
// IsRedirect returns true if the L7Rules are a redirect.
func (a *PerSelectorPolicy) IsRedirect() bool {
return a != nil && a.isRedirect
}
// HasL7Rules returns whether the `L7Rules` contains any L7 rules.
func (a *PerSelectorPolicy) HasL7Rules() bool {
return !a.L7Rules.IsEmpty()
}
// L7DataMap contains a map of L7 rules per endpoint where key is a CachedSelector
type L7DataMap map[CachedSelector]*PerSelectorPolicy
func (l7 L7DataMap) MarshalJSON() ([]byte, error) {
if len(l7) == 0 {
return []byte("[]"), nil
}
/* First, create a sorted slice of the selectors so we can get
* consistent JSON output */
selectors := make(types.CachedSelectorSlice, 0, len(l7))
for cs := range l7 {
selectors = append(selectors, cs)
}
sort.Sort(selectors)
/* Now we can iterate the slice and generate JSON entries. */
var err error
buffer := bytes.NewBufferString("[")
for _, cs := range selectors {
buffer.WriteString("{\"")
buffer.WriteString(cs.String())
buffer.WriteString("\":")
b, err := json.Marshal(l7[cs])
if err == nil {
buffer.Write(b)
} else {
buffer.WriteString("\"L7DataMap error: ")
buffer.WriteString(err.Error())
buffer.WriteString("\"")
}
buffer.WriteString("},")
}
buffer.Truncate(buffer.Len() - 1) // Drop the final ","
buffer.WriteString("]")
return buffer.Bytes(), err
}
// L7ParserType is the type used to indicate what L7 parser to use.
// Consts are defined for all well known L7 parsers.
// Unknown string values are created for key-value pair policies, which
// are then transparently used in redirect configuration.
type L7ParserType string
func (l7 L7ParserType) String() string {
return (string)(l7)
}
const (
// ParserTypeNone represents the case where no parser type is provided.
ParserTypeNone L7ParserType = ""
// ParserTypeTLS is used for TLS origination, termination, or SNI filtering without any L7
// parsing. If TLS policies are used with HTTP rules, ParserTypeHTTP is used instead.
ParserTypeTLS L7ParserType = "tls"
// ParserTypeCRD is used with a custom CiliumEnvoyConfig redirection. Incompatible with any
// parser type with L7 enforcement (HTTP, Kafka, proxylib), as the custom Listener generally
// does not support them.
ParserTypeCRD L7ParserType = "crd"
// ParserTypeHTTP specifies a HTTP parser type
ParserTypeHTTP L7ParserType = "http"
// ParserTypeKafka specifies a Kafka parser type
ParserTypeKafka L7ParserType = "kafka"
// ParserTypeDNS specifies a DNS parser type
ParserTypeDNS L7ParserType = "dns"
)
// redirectTypes is a bitmask of redirection types of multiple filters
type redirectTypes uint16
const (
// redirectTypeDNS bit is set when policy contains a redirection to DNS proxy
redirectTypeDNS redirectTypes = 1 << iota
// redirectTypeEnvoy bit is set when policy contains a redirection to Envoy
redirectTypeEnvoy
// redirectTypeProxylib bits are set when policy contains a redirection to Proxylib (via
// Envoy)
redirectTypeProxylib redirectTypes = 1<<iota | redirectTypeEnvoy
// redirectTypeNone represents the case where there is no proxy redirect
redirectTypeNone redirectTypes = redirectTypes(0)
)
func (from L7ParserType) canPromoteTo(to L7ParserType) bool {
switch from {
case ParserTypeNone:
// ParserTypeNone can be promoted to any other type
return true
case ParserTypeTLS:
// ParserTypeTLS can be promoted to any other type, except for DNS or CRD,
// but ParserTypeTLS can not be demoted to ParserTypeNone
if to != ParserTypeNone && to != ParserTypeDNS && to != ParserTypeCRD {
return true
}
}
return false
}
// Merge ParserTypes 'a' to 'b' if possible
func (a L7ParserType) Merge(b L7ParserType) (L7ParserType, error) {
if a == b {
return a, nil
}
if a.canPromoteTo(b) {
return b, nil
}
if b.canPromoteTo(a) {
return a, nil
}
return ParserTypeNone, fmt.Errorf("cannot merge conflicting L7 parsers (%s/%s)", a, b)
}
// ruleOrigin is an interned labels.LabelArrayList.String(), a list of rule labels tracking which
// policy rules are the origin for this policy. This information is used when distilling a policy to
// an EndpointPolicy, to track which policy rules were involved for a specific verdict.
type ruleOrigin unique.Handle[string]
func (ro ruleOrigin) Value() string {
return unique.Handle[string](ro).Value()
}
func makeRuleOrigin(lbls labels.LabelArrayList) ruleOrigin {
return ruleOrigin(unique.Make(lbls.String()))
}
func (ro *ruleOrigin) Merge(other ruleOrigin) bool {
if ro.Value() == "" {
*ro = other
return true
}
if other.Value() != "" {
*ro = ruleOrigin(unique.Make(labels.MergeSortedLabelArrayListStrings(ro.Value(), other.Value())))
return true
}
return false
}
func singleRuleOrigin(ruleLabels stringLabels) ruleOrigin {
return ruleOrigin(ruleLabels)
}
var NilRuleOrigin = singleRuleOrigin(EmptyStringLabels)
type testOrigin map[CachedSelector]labels.LabelArrayList
func OriginForTest(m testOrigin) map[CachedSelector]ruleOrigin {
res := make(map[CachedSelector]ruleOrigin, len(m))
for cs, lbls := range m {
res[cs] = makeRuleOrigin(lbls)
}
return res
}
func (o ruleOrigin) GetLabelArrayList() labels.LabelArrayList {
return labels.LabelArrayListFromString(o.Value())
}
// stringLabels is an interned labels.LabelArray.String()
type stringLabels unique.Handle[string]
var EmptyStringLabels = makeStringLabels(nil)
func (sl stringLabels) Value() string {
return unique.Handle[string](sl).Value()
}
func makeStringLabels(lbls labels.LabelArray) stringLabels {
return stringLabels(unique.Make(lbls.Sort().String()))
}
// L4Filter represents the policy (allowed remote sources / destinations of
// traffic) that applies at a specific L4 port/protocol combination (including
// all ports and protocols), at either ingress or egress. The policy here is
// specified in terms of selectors that are mapped to security identities via
// the selector cache.
type L4Filter struct {
// Port is the destination port to allow. Port 0 indicates that all traffic
// is allowed at L4.
Port uint16 `json:"port"`
// EndPort is zero for a singular port
EndPort uint16 `json:"endPort,omitempty"`
PortName string `json:"port-name,omitempty"`
// Protocol is the L4 protocol to allow or NONE
Protocol api.L4Proto `json:"protocol"`
// U8Proto is the Protocol in numeric format, or 0 for NONE
U8Proto u8proto.U8proto `json:"-"`
// wildcard is the cached selector representing a wildcard in this filter, if any.
// This is nil the wildcard selector in not in 'PerSelectorPolicies'.
// When the wildcard selector is in 'PerSelectorPolicies' this is set to that
// same selector, which can then be used as a map key to find the corresponding
// L4-only L7 policy (which can be nil).
wildcard CachedSelector
// PerSelectorPolicies is a map of policies for selectors, including any L7 rules passed to
// the L7 proxy. nil values represent cached selectors that have selector-specific policy
// restriction (such as no L7 rules). Holds references to the cached selectors, which must
// be released!
PerSelectorPolicies L7DataMap `json:"l7-rules,omitempty"`
// L7Parser specifies the L7 protocol parser (optional). If specified as
// an empty string, then means that no L7 proxy redirect is performed.
L7Parser L7ParserType `json:"-"`
// Ingress is true if filter applies at ingress; false if it applies at egress.
Ingress bool `json:"-"`
// RuleOrigin is a set of rule labels tracking which policy rules are the origin for this
// L3/L4 filter.
RuleOrigin map[CachedSelector]ruleOrigin `json:"-"`
// This reference is circular, but it is cleaned up at Detach()
policy atomic.Pointer[L4Policy]
}
// SelectsAllEndpoints returns whether the L4Filter selects all
// endpoints, which is true if the wildcard endpoint selector is present in the
// map.
func (l4 *L4Filter) SelectsAllEndpoints() bool {
for cs := range l4.PerSelectorPolicies {
if cs.IsWildcard() {
return true
}
}
return false
}
// CopyL7RulesPerEndpoint returns a shallow copy of the PerSelectorPolicies of the
// L4Filter.
func (l4 *L4Filter) GetPerSelectorPolicies() L7DataMap {
return l4.PerSelectorPolicies
}
// GetL7Parser returns the L7ParserType of the L4Filter.
func (l4 *L4Filter) GetL7Parser() L7ParserType {
return l4.L7Parser
}
// GetIngress returns whether the L4Filter applies at ingress or egress.
func (l4 *L4Filter) GetIngress() bool {
return l4.Ingress
}
// GetPort returns the port at which the L4Filter applies as a uint16.
func (l4 *L4Filter) GetPort() uint16 {
return l4.Port
}
// Equals returns true if two L4Filters are equal
func (l4 *L4Filter) Equals(bL4 *L4Filter) bool {
if l4.Port == bL4.Port &&
l4.EndPort == bL4.EndPort &&
l4.PortName == bL4.PortName &&
l4.Protocol == bL4.Protocol &&
l4.Ingress == bL4.Ingress &&
l4.L7Parser == bL4.L7Parser &&
l4.wildcard == bL4.wildcard {
if len(l4.PerSelectorPolicies) != len(bL4.PerSelectorPolicies) {
return false
}
for k, v := range l4.PerSelectorPolicies {
bV, ok := bL4.PerSelectorPolicies[k]
if !ok || !bV.Equal(v) {
return false
}
}
return true
}
return false
}
// ChangeState allows caller to revert changes made by (multiple) toMapState call(s)
// All fields are maps so we can pass this by value.
type ChangeState struct {
Adds Keys // Added or modified keys, if not nil
Deletes Keys // deleted keys, if not nil
old mapStateMap // Old values of all modified or deleted keys, if not nil
}
// NewRevertState returns an empty ChangeState suitable for reverting MapState changes.
// The private 'old' field is initialized so that old state can be restored if need be.
func NewRevertState() ChangeState {
return ChangeState{
Adds: make(Keys),
old: make(mapStateMap),
}
}
func (c *ChangeState) Empty() bool {
return len(c.Adds)+len(c.Deletes)+len(c.old) == 0
}
// Size returns the total number of Adds minus
// the total number of true Deletes (Deletes
// that are not also in Adds). The return value
// can be negative.
func (c *ChangeState) Size() int {
deleteLen := 0
for k := range c.Deletes {
if _, ok := c.Adds[k]; !ok {
deleteLen++
}
}
return len(c.Adds) - deleteLen
}
// toMapState converts a single filter into a MapState entries added to 'p.PolicyMapState'.
//
// Note: It is possible for two selectors to select the same security ID. To give priority to deny,
// AuthType, and L7 redirection (e.g., for visibility purposes), the mapstate entries are added to
// 'p.PolicyMapState' using insertWithChanges().
// Keys and old values of any added or deleted entries are added to 'changes'.
// 'redirects' is the map of currently realized redirects, it is used to find the proxy port for any redirects.
// p.SelectorCache is used as Identities interface during this call, which only has GetPrefix() that
// needs no lock.
func (l4 *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, changes ChangeState) {
port := l4.Port
proto := l4.U8Proto
direction := trafficdirection.Egress
if l4.Ingress {
direction = trafficdirection.Ingress
}
logger := log
if option.Config.Debug {
logger = log.WithFields(logrus.Fields{
logfields.EndpointID: p.PolicyOwner.GetID(),
logfields.Port: port,
logfields.PortName: l4.PortName,
logfields.Protocol: proto,
logfields.TrafficDirection: direction,
})
}
// resolve named port
if port == 0 && l4.PortName != "" {
port = p.PolicyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto)
if port == 0 {
return // nothing to be done for undefined named port
}
}
var keysToAdd []Key
for _, mp := range PortRangeToMaskedPorts(port, l4.EndPort) {
keysToAdd = append(keysToAdd,
KeyForDirection(direction).WithPortProtoPrefix(proto, mp.port, uint8(bits.LeadingZeros16(^mp.mask))))
}
// find the L7 rules for the wildcard entry, if any
var wildcardRule *PerSelectorPolicy
if l4.wildcard != nil {
wildcardRule = l4.PerSelectorPolicies[l4.wildcard]
}
isL4Wildcard := (l4.Port != 0 || l4.PortName != "") && l4.wildcard != nil
for cs, currentRule := range l4.PerSelectorPolicies {
// have wildcard and this is an L3L4 key?
isL3L4withWildcardPresent := isL4Wildcard && cs != l4.wildcard
if isL3L4withWildcardPresent && wildcardRule.covers(currentRule) {
logger.WithField(logfields.EndpointSelector, cs).Debug("ToMapState: Skipping L3/L4 key due to existing L4-only key")
continue
}
isDenyRule := currentRule != nil && currentRule.IsDeny
isRedirect := currentRule.IsRedirect()
listener := currentRule.GetListener()
priority := currentRule.GetPriority()
if !isDenyRule && isL3L4withWildcardPresent && !isRedirect {
// Inherit the redirect status from the wildcard rule.
// This is now needed as 'covers()' can pass non-redirect L3L4 rules
// that must inherit the redirect status from the L4-only (== L3-wildcard)
// rule due to auth type on the L3L4 rule being different than in the
// L4-only rule.
isRedirect = wildcardRule.IsRedirect()
listener = wildcardRule.GetListener()
priority = wildcardRule.GetPriority()
}
authReq := currentRule.getAuthRequirement()
var proxyPort uint16
if isRedirect {
var err error
proxyPort, err = p.LookupRedirectPort(l4.Ingress, string(l4.Protocol), port, listener)
if err != nil {
// Skip unrealized redirects; this happens routineously just
// before new redirects are realized. Once created, we are called
// again.
logger.WithError(err).WithField(logfields.EndpointSelector, cs).Debugf("Skipping unrealized redirect")
continue
}
}
entry := newMapStateEntry(l4.RuleOrigin[cs], proxyPort, priority, isDenyRule, authReq)
if cs.IsWildcard() {
for _, keyToAdd := range keysToAdd {
keyToAdd.Identity = 0
p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes)
if port == 0 {
// Allow-all
logger.WithField(logfields.EndpointSelector, cs).Debug("ToMapState: allow all")
} else {
// L4 allow
logger.WithField(logfields.EndpointSelector, cs).Debug("ToMapState: L4 allow all")
}
}
continue
}
idents := cs.GetSelections(p.VersionHandle)
if option.Config.Debug {
if isDenyRule {
logger.WithFields(logrus.Fields{
logfields.Version: p.VersionHandle,
logfields.EndpointSelector: cs,
logfields.PolicyID: idents,
}).Debug("ToMapState: Denied remote IDs")
} else {
logger.WithFields(logrus.Fields{
logfields.Version: p.VersionHandle,
logfields.EndpointSelector: cs,
logfields.PolicyID: idents,
}).Debug("ToMapState: Allowed remote IDs")
}
}
for _, id := range idents {
for _, keyToAdd := range keysToAdd {
keyToAdd.Identity = id
p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes)
// If Cilium is in dual-stack mode then the "World" identity
// needs to be split into two identities to represent World
// IPv6 and IPv4 traffic distinctly from one another.
if id == identity.ReservedIdentityWorld && option.Config.IsDualStack() {
keyToAdd.Identity = identity.ReservedIdentityWorldIPv4
p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes)
keyToAdd.Identity = identity.ReservedIdentityWorldIPv6
p.policyMapState.insertWithChanges(keyToAdd, entry, features, changes)
}
}
}
}
if option.Config.Debug {
log.WithFields(logrus.Fields{
logfields.PolicyKeysAdded: changes.Adds,
logfields.PolicyKeysDeleted: changes.Deletes,
logfields.PolicyEntriesOld: changes.old,
}).Debug("ToMapChange changes")
}
}
// IdentitySelectionUpdated implements CachedSelectionUser interface
// This call is made from a single goroutine in FIFO order to keep add
// and delete events ordered properly. No locks are held.
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
func (l4 *L4Filter) IdentitySelectionUpdated(cs types.CachedSelector, added, deleted []identity.NumericIdentity) {
log.WithFields(logrus.Fields{
logfields.EndpointSelector: cs,
logfields.AddedPolicyID: added,
logfields.DeletedPolicyID: deleted,
}).Debug("identities selected by L4Filter updated")
// Skip updates on wildcard selectors, as datapath and L7
// proxies do not need enumeration of all ids for L3 wildcard.
// This mirrors the per-selector logic in toMapState().
if cs.IsWildcard() {
return
}
// Push endpoint policy changes.
//
// `l4.policy` is nil when the filter is detached so
// that we could not push updates on an unstable policy.
l4Policy := l4.policy.Load()
if l4Policy != nil {
l4Policy.AccumulateMapChanges(l4, cs, added, deleted)
}
}
func (l4 *L4Filter) IdentitySelectionCommit(txn *versioned.Tx) {
log.WithField(logfields.NewVersion, txn).Debug("identity selection updates done")
// Push endpoint policy incremental sync.
//
// `l4.policy` is nil when the filter is detached so
// that we could not push updates on an unstable policy.
l4Policy := l4.policy.Load()
if l4Policy != nil {
l4Policy.SyncMapChanges(l4, txn)
}
}
func (l4 *L4Filter) IsPeerSelector() bool {
return true
}
func (l4 *L4Filter) cacheIdentitySelector(sel api.EndpointSelector, lbls stringLabels, selectorCache *SelectorCache) CachedSelector {
cs, added := selectorCache.AddIdentitySelector(l4, lbls, sel)
if added {
l4.PerSelectorPolicies[cs] = nil // no per-selector policy (yet)
}
return cs
}
func (l4 *L4Filter) cacheIdentitySelectors(selectors api.EndpointSelectorSlice, lbls stringLabels, selectorCache *SelectorCache) {
for _, sel := range selectors {
l4.cacheIdentitySelector(sel, lbls, selectorCache)
}
}
func (l4 *L4Filter) cacheFQDNSelectors(selectors api.FQDNSelectorSlice, lbls stringLabels, selectorCache *SelectorCache) {
for _, fqdnSel := range selectors {
l4.cacheFQDNSelector(fqdnSel, lbls, selectorCache)
}
}
func (l4 *L4Filter) cacheFQDNSelector(sel api.FQDNSelector, lbls stringLabels, selectorCache *SelectorCache) types.CachedSelector {
cs, added := selectorCache.AddFQDNSelector(l4, lbls, sel)
if added {
l4.PerSelectorPolicies[cs] = nil // no per-selector policy (yet)
}
return cs
}
// add L7 rules for all endpoints in the L7DataMap
func (l7 L7DataMap) addPolicyForSelector(rules *api.L7Rules, terminatingTLS, originatingTLS *TLSContext, auth *api.Authentication, deny bool, sni []string, listener string, priority uint8) {
isRedirect := !deny && (listener != "" || terminatingTLS != nil || originatingTLS != nil || len(sni) > 0 || !rules.IsEmpty())
for epsel := range l7 {
l7policy := &PerSelectorPolicy{
TerminatingTLS: terminatingTLS,
OriginatingTLS: originatingTLS,
Authentication: auth,
IsDeny: deny,
ServerNames: NewStringSet(sni),
isRedirect: isRedirect,
Listener: listener,
Priority: priority,
}
if rules != nil {
l7policy.L7Rules = *rules
}
l7[epsel] = l7policy
}
}
type TLSDirection string
const (
TerminatingTLS TLSDirection = "terminating"
OriginatingTLS TLSDirection = "originating"
)
// getCerts reads certificates out of the PolicyContext, reading from k8s or local files depending on config
// and puts the values into the relevant keys in the TLSContext. Note that if the returned TLSContext.FromFile is
// `false`, then this will be read from Kubernetes.
func (l4 *L4Filter) getCerts(policyCtx PolicyContext, tls *api.TLSContext, direction TLSDirection) (*TLSContext, error) {
if tls == nil {
return nil, nil
}
ca, public, private, inlineSecrets, err := policyCtx.GetTLSContext(tls)
if err != nil {
log.WithError(err).Warningf("policy: Error getting %s TLS Context.", direction)
return nil, err
}
// If the secret is not being included into NPDS inline, we're going to pass an SDS reference instead.
if inlineSecrets {
switch direction {
case TerminatingTLS:
if public == "" || private == "" {
return nil, fmt.Errorf("Terminating TLS context is missing certs.")
}
case OriginatingTLS:
if ca == "" {
return nil, fmt.Errorf("Originating TLS context is missing CA certs.")
}
default:
return nil, fmt.Errorf("invalid TLS direction: %s", direction)
}
} else {
log.Debug("Secret being read from Kubernetes", "secret", k8sTypes.NamespacedName(*tls.Secret))
}
return &TLSContext{
TrustedCA: ca,
CertificateChain: public,
PrivateKey: private,
FromFile: inlineSecrets,
Secret: k8sTypes.NamespacedName(*tls.Secret),
}, nil
}
// createL4Filter creates a filter for L4 policy that applies to the specified
// endpoints and port/protocol, with reference to the original rules that the
// filter is derived from. This filter may be associated with a series of L7
// rules via the `rule` parameter.
// Not called with an empty peerEndpoints.
func createL4Filter(policyCtx PolicyContext, peerEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto, ruleLabels stringLabels, ingress bool, fqdns api.FQDNSelectorSlice,
) (*L4Filter, error) {
selectorCache := policyCtx.GetSelectorCache()
portName := ""
p := uint64(0)
if iana.IsSvcName(port.Port) {
portName = port.Port
} else {
// already validated via PortRule.Validate()
p, _ = strconv.ParseUint(port.Port, 0, 16)
}
// already validated via L4Proto.Validate(), never "ANY"
// NOTE: "ANY" for wildcarded port/proto!
u8p, _ := u8proto.ParseProtocol(string(protocol))
l4 := &L4Filter{
Port: uint16(p), // 0 for L3-only rules and named ports
EndPort: uint16(port.EndPort), // 0 for a single port, >= 'Port' for a range
PortName: portName, // non-"" for named ports
Protocol: protocol,
U8Proto: u8p,
PerSelectorPolicies: make(L7DataMap),
RuleOrigin: make(map[CachedSelector]ruleOrigin), // Filled in below.
Ingress: ingress,
}
if peerEndpoints.SelectsAllEndpoints() {
l4.wildcard = l4.cacheIdentitySelector(api.WildcardEndpointSelector, ruleLabels, selectorCache)
} else {
l4.cacheIdentitySelectors(peerEndpoints, ruleLabels, selectorCache)
l4.cacheFQDNSelectors(fqdns, ruleLabels, selectorCache)
}
var terminatingTLS *TLSContext
var originatingTLS *TLSContext
var rules *api.L7Rules
var sni []string
listener := ""
var priority uint8
pr := rule.GetPortRule()
if pr != nil {
rules = pr.Rules
sni = pr.ServerNames
// Get TLS contexts, if any
var err error
terminatingTLS, err = l4.getCerts(policyCtx, pr.TerminatingTLS, TerminatingTLS)
if err != nil {
return nil, err
}
originatingTLS, err = l4.getCerts(policyCtx, pr.OriginatingTLS, OriginatingTLS)
if err != nil {
return nil, err
}
// Set parser type to TLS, if TLS. This will be overridden by L7 below, if rules
// exists.
if terminatingTLS != nil || originatingTLS != nil || len(pr.ServerNames) > 0 {
l4.L7Parser = ParserTypeTLS
}
// Determine L7ParserType from rules present. Earlier validation ensures rules
// for multiple protocols are not present here.
if rules != nil {
// we need this to redirect DNS UDP (or ANY, which is more useful)
if len(rules.DNS) > 0 {
l4.L7Parser = ParserTypeDNS
} else if protocol == api.ProtoTCP { // Other than DNS only support TCP
switch {
case len(rules.HTTP) > 0:
l4.L7Parser = ParserTypeHTTP
case len(rules.Kafka) > 0:
l4.L7Parser = ParserTypeKafka
case rules.L7Proto != "":
l4.L7Parser = (L7ParserType)(rules.L7Proto)
}
}
}
// Override the parser type to CRD is applicable.
if pr.Listener != nil {
l4.L7Parser = ParserTypeCRD
ns := policyCtx.GetNamespace()
resource := pr.Listener.EnvoyConfig
switch resource.Kind {
case "CiliumEnvoyConfig":
if ns == "" {
// Cluster-scoped CCNP tries to use namespaced
// CiliumEnvoyConfig
//
// TODO: Catch this in rule validation once we have a
// validation context in there so that we can differentiate
// between CNP and CCNP at validation time.
return nil, fmt.Errorf("Listener %q in CCNP can not use Kind CiliumEnvoyConfig", pr.Listener.Name)
}
case "CiliumClusterwideEnvoyConfig":
// CNP refers to a cluster-scoped listener
ns = ""
default:
}
listener, _ = api.ResourceQualifiedName(ns, resource.Name, pr.Listener.Name, api.ForceNamespace)
priority = pr.Listener.Priority
}
}
if l4.L7Parser != ParserTypeNone || auth != nil || policyCtx.IsDeny() {
l4.PerSelectorPolicies.addPolicyForSelector(rules, terminatingTLS, originatingTLS, auth, policyCtx.IsDeny(), sni, listener, priority)
}
origin := singleRuleOrigin(ruleLabels)
for cs := range l4.PerSelectorPolicies {
l4.RuleOrigin[cs] = origin
}
return l4, nil
}
func (l4 *L4Filter) removeSelectors(selectorCache *SelectorCache) {
selectors := make(types.CachedSelectorSlice, 0, len(l4.PerSelectorPolicies))
for cs := range l4.PerSelectorPolicies {
selectors = append(selectors, cs)
}
selectorCache.RemoveSelectors(selectors, l4)
}
// detach releases the references held in the L4Filter and must be called before
// the filter is left to be garbage collected.
// L4Filter may still be accessed concurrently after it has been detached.
func (l4 *L4Filter) detach(selectorCache *SelectorCache) {
l4.removeSelectors(selectorCache)
l4.policy.Store(nil)
}
// attach signifies that the L4Filter is ready and reacheable for updates
// from SelectorCache. L4Filter (and L4Policy) is read-only after this is called,
// multiple goroutines will be reading the fields from that point on.
func (l4 *L4Filter) attach(ctx PolicyContext, l4Policy *L4Policy) policyFeatures {
var features policyFeatures
for cs, cp := range l4.PerSelectorPolicies {
if cp != nil {
if cp.isRedirect {
features.setFeature(redirectRules)
}
if cp.IsDeny {
features.setFeature(denyRules)
}
explicit, authType := getAuthType(cp.Authentication)
if explicit {
features.setFeature(authRules)
if authType != types.AuthTypeDisabled {
if l4Policy.authMap == nil {
l4Policy.authMap = make(authMap, 1)
}
authTypes := l4Policy.authMap[cs]
if authTypes == nil {
authTypes = make(AuthTypes, 1)
}
authTypes[authType] = struct{}{}
l4Policy.authMap[cs] = authTypes
}
}
// Compute Envoy policies when a policy is ready to be used
if len(cp.L7Rules.HTTP) > 0 {
cp.EnvoyHTTPRules, cp.CanShortCircuit = ctx.GetEnvoyHTTPRules(&cp.L7Rules)
}
}
}
l4.policy.Store(l4Policy)
return features
}
// createL4IngressFilter creates a filter for L4 policy that applies to the
// specified endpoints and port/protocol for ingress traffic, with reference
// to the original rules that the filter is derived from. This filter may be
// associated with a series of L7 rules via the `rule` parameter.
//
// hostWildcardL7 determines if L7 traffic from Host should be
// wildcarded (in the relevant daemon mode).
func createL4IngressFilter(policyCtx PolicyContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto, ruleLabels stringLabels,
) (*L4Filter, error) {
filter, err := createL4Filter(policyCtx, fromEndpoints, auth, rule, port, protocol, ruleLabels, true, nil)
if err != nil {
return nil, err
}
// If the filter would apply proxy redirection for the Host, when we should accept
// everything from host, then wildcard Host at L7.
if len(hostWildcardL7) > 0 {
for cs, l7 := range filter.PerSelectorPolicies {
if l7.IsRedirect() && cs.Selects(versioned.Latest(), identity.ReservedIdentityHost) {
for _, name := range hostWildcardL7 {
selector := api.ReservedEndpointSelectors[name]
filter.cacheIdentitySelector(selector, ruleLabels, policyCtx.GetSelectorCache())
}
}
}
}
return filter, nil
}
// createL4EgressFilter creates a filter for L4 policy that applies to the
// specified endpoints and port/protocol for egress traffic, with reference
// to the original rules that the filter is derived from. This filter may be
// associated with a series of L7 rules via the `rule` parameter.
func createL4EgressFilter(policyCtx PolicyContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol,
protocol api.L4Proto, ruleLabels stringLabels, fqdns api.FQDNSelectorSlice,
) (*L4Filter, error) {
return createL4Filter(policyCtx, toEndpoints, auth, rule, port, protocol, ruleLabels, false, fqdns)
}
// redirectType returns the redirectType for this filter
func (l4 *L4Filter) redirectType() redirectTypes {
switch l4.L7Parser {
case ParserTypeNone:
return redirectTypeNone
case ParserTypeDNS:
return redirectTypeDNS
case ParserTypeHTTP, ParserTypeTLS, ParserTypeCRD:
return redirectTypeEnvoy
default:
// all other (non-empty) values are used for proxylib redirects
return redirectTypeProxylib
}
}
// IsRedirect returns true if the L4 filter contains a port redirection
func (l4 *L4Filter) IsRedirect() bool {
return l4.L7Parser != ParserTypeNone
}
// Marshal returns the `L4Filter` in a JSON string.
func (l4 *L4Filter) Marshal() string {
b, err := json.Marshal(l4)
if err != nil {
b = []byte("\"L4Filter error: " + err.Error() + "\"")
}
return string(b)
}
// String returns the `L4Filter` in a human-readable string.
func (l4 *L4Filter) String() string {
b, err := json.Marshal(l4)
if err != nil {
return err.Error()
}
return string(b)
}
// Note: Only used for policy tracing
func (l4 *L4Filter) matchesLabels(labels labels.LabelArray) (bool, bool) {
if l4.wildcard != nil {
perSelectorPolicy := l4.PerSelectorPolicies[l4.wildcard]
isDeny := perSelectorPolicy != nil && perSelectorPolicy.IsDeny
return true, isDeny
} else if len(labels) == 0 {
return false, false
}
var selected bool
for sel, rule := range l4.PerSelectorPolicies {
// slow, but OK for tracing
idSel := sel.(*identitySelector)
if lis, ok := idSel.source.(*labelIdentitySelector); ok && lis.xxxMatches(labels) {
isDeny := rule != nil && rule.IsDeny
selected = true
if isDeny {
return true, isDeny
}
}
}
return selected, false
}
// addL4Filter adds 'filterToMerge' into the 'resMap'. Returns an error if it
// the 'filterToMerge' can't be merged with an existing filter for the same
// port and proto.
func addL4Filter(policyCtx PolicyContext,
ctx *SearchContext, resMap L4PolicyMap,
p api.PortProtocol, proto api.L4Proto,
filterToMerge *L4Filter,
) error {
existingFilter := resMap.ExactLookup(p.Port, uint16(p.EndPort), string(proto))
if existingFilter == nil {
resMap.Upsert(p.Port, uint16(p.EndPort), string(proto), filterToMerge)
return nil
}
selectorCache := policyCtx.GetSelectorCache()
if err := mergePortProto(ctx, existingFilter, filterToMerge, selectorCache); err != nil {
filterToMerge.detach(selectorCache)
return err
}
// To keep the rule origin tracking correct, merge the rule label arrays for each CachedSelector
// we know about. New CachedSelectors are added.
for cs, newLabels := range filterToMerge.RuleOrigin {
if existingLabels, ok := existingFilter.RuleOrigin[cs]; ok {
if changed := existingLabels.Merge(newLabels); changed {
existingFilter.RuleOrigin[cs] = existingLabels
}
} else {
existingFilter.RuleOrigin[cs] = newLabels
}
}
resMap.Upsert(p.Port, uint16(p.EndPort), string(proto), existingFilter)
return nil
}
// L4PolicyMap is a list of L4 filters indexable by port/endport/protocol
type L4PolicyMap interface {
Upsert(port string, endPort uint16, protocol string, l4 *L4Filter)
Delete(port string, endPort uint16, protocol string)
ExactLookup(port string, endPort uint16, protocol string) *L4Filter
MatchesLabels(port, protocol string, labels labels.LabelArray) (match, isDeny bool)
Detach(selectorCache *SelectorCache)
IngressCoversContext(ctx *SearchContext) api.Decision
EgressCoversContext(ctx *SearchContext) api.Decision
ForEach(func(l4 *L4Filter) bool)
TestingOnlyEquals(bMap L4PolicyMap) bool
TestingOnlyDiff(expectedMap L4PolicyMap) string
Len() int
}
// NewL4PolicyMap creates an new L4PolicMap.
func NewL4PolicyMap() L4PolicyMap {
return &l4PolicyMap{
namedPortMap: make(map[string]*L4Filter),
rangePortMap: make(map[portProtoKey]*L4Filter),
rangePortIndex: bitlpm.NewUintTrie[uint32, map[portProtoKey]struct{}](),
}
}
// NewL4PolicyMapWithValues creates an new L4PolicMap, with an initial
// set of values. The initMap argument does not support port ranges.
func NewL4PolicyMapWithValues(initMap map[string]*L4Filter) L4PolicyMap {
l4M := &l4PolicyMap{
namedPortMap: make(map[string]*L4Filter),
rangePortMap: make(map[portProtoKey]*L4Filter),
rangePortIndex: bitlpm.NewUintTrie[uint32, map[portProtoKey]struct{}](),
}
for k, v := range initMap {
portProtoSlice := strings.Split(k, "/")
if len(portProtoSlice) < 2 {
continue
}
l4M.Upsert(portProtoSlice[0], 0, portProtoSlice[1], v)
}
return l4M
}
type portProtoKey struct {
port, endPort uint16
proto uint8
}
// l4PolicyMap is the implementation of L4PolicyMap
type l4PolicyMap struct {
// namedPortMap represents the named ports (a Kubernetes feature)
// that map to an L4Filter. They must be tracked at the selection
// level, because they can only be resolved at the endpoint/identity
// level. Named ports cannot have ranges.
namedPortMap map[string]*L4Filter
// rangePortMap is a map of all L4Filters indexed by their port-
// protocol.
rangePortMap map[portProtoKey]*L4Filter
// rangePortIndex is an index of all L4Filters so that
// L4Filters that have overlapping port ranges can be looked up
// by with a single port.
rangePortIndex *bitlpm.UintTrie[uint32, map[portProtoKey]struct{}]
}
func parsePortProtocol(port, protocol string) (uint16, uint8) {
// These string values have been validated many times
// over at this point.
prt, _ := strconv.ParseUint(port, 10, 16)
proto, _ := u8proto.ParseProtocol(protocol)
return uint16(prt), uint8(proto)
}
// makePolicyMapKey creates a protocol-port uint32 with the
// upper 16 bits containing the protocol and the lower 16
// bits containing the port.
func makePolicyMapKey(port, mask uint16, proto uint8) uint32 {
return (uint32(proto) << 16) | uint32(port&mask)
}
// Upsert L4Filter adds an L4Filter indexed by protocol/port-endPort.
func (l4M *l4PolicyMap) Upsert(port string, endPort uint16, protocol string, l4 *L4Filter) {
if iana.IsSvcName(port) {
l4M.namedPortMap[port+"/"+protocol] = l4
return
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
_, indexExists := l4M.rangePortMap[ppK]
l4M.rangePortMap[ppK] = l4
// We do not need to reindex a key that already exists,
// even if the filter changed.
if !indexExists {
for _, mp := range PortRangeToMaskedPorts(portU, endPort) {
k := makePolicyMapKey(mp.port, mp.mask, protoU)
prefix := 32 - uint(bits.TrailingZeros16(mp.mask))
portProtoSet, ok := l4M.rangePortIndex.ExactLookup(prefix, k)
if !ok {
portProtoSet = make(map[portProtoKey]struct{})
l4M.rangePortIndex.Upsert(prefix, k, portProtoSet)
}
portProtoSet[ppK] = struct{}{}
}
}
}
// Delete an L4Filter from the index by protocol/port-endPort
func (l4M *l4PolicyMap) Delete(port string, endPort uint16, protocol string) {
if iana.IsSvcName(port) {
delete(l4M.namedPortMap, port+"/"+protocol)
return
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
_, indexExists := l4M.rangePortMap[ppK]
delete(l4M.rangePortMap, ppK)
// Only delete the index if the key exists.
if indexExists {
for _, mp := range PortRangeToMaskedPorts(portU, endPort) {
k := makePolicyMapKey(mp.port, mp.mask, protoU)
prefix := 32 - uint(bits.TrailingZeros16(mp.mask))
portProtoSet, ok := l4M.rangePortIndex.ExactLookup(prefix, k)
if !ok {
return
}
delete(portProtoSet, ppK)
if len(portProtoSet) == 0 {
l4M.rangePortIndex.Delete(prefix, k)
}
}
}
}
// ExactLookup looks up an L4Filter by protocol/port-endPort and looks for an exact match.
func (l4M *l4PolicyMap) ExactLookup(port string, endPort uint16, protocol string) *L4Filter {
if iana.IsSvcName(port) {
return l4M.namedPortMap[port+"/"+protocol]
}
portU, protoU := parsePortProtocol(port, protocol)
ppK := portProtoKey{
port: portU,
endPort: endPort,
proto: protoU,
}
return l4M.rangePortMap[ppK]
}
// MatchesLabels checks if a given port, protocol, and labels matches
// any Rule in the L4PolicyMap.
func (l4M *l4PolicyMap) MatchesLabels(port, protocol string, labels labels.LabelArray) (match, isDeny bool) {
if iana.IsSvcName(port) {
l4 := l4M.namedPortMap[port+"/"+protocol]
if l4 != nil {
return l4.matchesLabels(labels)
}
return
}
portU, protoU := parsePortProtocol(port, protocol)
l4PortProtoKeys := make(map[portProtoKey]struct{})
l4M.rangePortIndex.Ancestors(32, makePolicyMapKey(portU, 0xffff, protoU),
func(_ uint, _ uint32, portProtoSet map[portProtoKey]struct{}) bool {
for k := range portProtoSet {
v, ok := l4M.rangePortMap[k]
if ok {
if _, ok := l4PortProtoKeys[k]; !ok {
match, isDeny = v.matchesLabels(labels)
if isDeny {
return false
}
}
}
}
return true
})
return
}
// ForEach iterates over all L4Filters in the l4PolicyMap.
func (l4M *l4PolicyMap) ForEach(fn func(l4 *L4Filter) bool) {
for _, f := range l4M.namedPortMap {
if !fn(f) {
return
}
}
for _, v := range l4M.rangePortMap {
if !fn(v) {
return
}
}
}
// Equals returns true if both L4PolicyMaps are equal.
func (l4M *l4PolicyMap) TestingOnlyEquals(bMap L4PolicyMap) bool {
if l4M.Len() != bMap.Len() {
return false
}
equal := true
l4M.ForEach(func(l4 *L4Filter) bool {
port := l4.PortName
if len(port) == 0 {
port = fmt.Sprintf("%d", l4.Port)
}
l4B := bMap.ExactLookup(port, l4.EndPort, string(l4.Protocol))
equal = l4.Equals(l4B)
return equal
})
return equal
}
// Diff returns the difference between to L4PolicyMaps.
func (l4M *l4PolicyMap) TestingOnlyDiff(expected L4PolicyMap) (res string) {
res += "Missing (-), Unexpected (+):\n"
expected.ForEach(func(eV *L4Filter) bool {
port := eV.PortName
if len(port) == 0 {
port = fmt.Sprintf("%d", eV.Port)
}
oV := l4M.ExactLookup(port, eV.Port, string(eV.Protocol))
if oV != nil {
if !eV.Equals(oV) {
res += "- " + eV.String() + "\n"
res += "+ " + oV.String() + "\n"
}
} else {
res += "- " + eV.String() + "\n"
}
return true
})
l4M.ForEach(func(oV *L4Filter) bool {
port := oV.PortName
if len(port) == 0 {
port = fmt.Sprintf("%d", oV.Port)
}
eV := expected.ExactLookup(port, oV.Port, string(oV.Protocol))
if eV == nil {
res += "+ " + oV.String() + "\n"
}
return true
})
return
}
// Len returns the number of entries in the map.
func (l4M *l4PolicyMap) Len() int {
if l4M == nil {
return 0
}
return len(l4M.namedPortMap) + len(l4M.rangePortMap)
}
type policyFeatures uint8
const (
denyRules policyFeatures = 1 << iota
redirectRules
authRules
allFeatures policyFeatures = ^policyFeatures(0)
)
func (pf *policyFeatures) setFeature(feature policyFeatures) {
*pf |= feature
}
func (pf policyFeatures) contains(feature policyFeatures) bool {
return pf&feature != 0
}
type L4DirectionPolicy struct {
PortRules L4PolicyMap
// features tracks properties of PortRules to skip code when features are not used
features policyFeatures
}
func newL4DirectionPolicy() L4DirectionPolicy {
return L4DirectionPolicy{
PortRules: NewL4PolicyMap(),
}
}
// Detach removes the cached selectors held by L4PolicyMap from the
// selectorCache, allowing the map to be garbage collected when there
// are no more references to it.
func (l4 L4DirectionPolicy) Detach(selectorCache *SelectorCache) {
l4.PortRules.Detach(selectorCache)
}
// detach is used directly from tracing and testing functions
func (l4M *l4PolicyMap) Detach(selectorCache *SelectorCache) {
l4M.ForEach(func(l4 *L4Filter) bool {
l4.detach(selectorCache)
return true
})
}
// Attach makes all the L4Filters to point back to the L4Policy that contains them.
// This is done before the L4PolicyMap is exposed to concurrent access.
// Returns the bitmask of all redirect types for this policymap.
func (l4 *L4DirectionPolicy) attach(ctx PolicyContext, l4Policy *L4Policy) redirectTypes {
var redirectTypes redirectTypes
var features policyFeatures
l4.PortRules.ForEach(func(f *L4Filter) bool {
features |= f.attach(ctx, l4Policy)
redirectTypes |= f.redirectType()
return true
})
l4.features = features
return redirectTypes
}
// containsAllL3L4 checks if the L4PolicyMap contains all L4 ports in `ports`.
// For L4Filters that specify ToEndpoints or FromEndpoints, uses `labels` to
// determine whether the policy allows L4 communication between the corresponding
// endpoints.
// Returns api.Denied in the following conditions:
// - If a single port is not present in the `L4PolicyMap` and is not allowed
// by the distilled L3 policy
// - If a port is present in the `L4PolicyMap`, but it applies ToEndpoints or
// FromEndpoints constraints that require labels not present in `labels`.
//
// Otherwise, returns api.Allowed.
//
// Note: Only used for policy tracing
func (l4M *l4PolicyMap) containsAllL3L4(labels labels.LabelArray, ports []*models.Port) api.Decision {
if l4M.Len() == 0 {
return api.Allowed
}
// Check L3-only filters first.
filter := l4M.ExactLookup("0", 0, "ANY")
if filter != nil {
matches, isDeny := filter.matchesLabels(labels)
switch {
case matches && isDeny:
return api.Denied
case matches:
return api.Allowed
}
}
for _, l4Ctx := range ports {
portStr := l4Ctx.Name
if !iana.IsSvcName(portStr) {
portStr = strconv.FormatUint(uint64(l4Ctx.Port), 10)
}
lwrProtocol := l4Ctx.Protocol
switch lwrProtocol {
case "", models.PortProtocolANY:
tcpmatch, isTCPDeny := l4M.MatchesLabels(portStr, "TCP", labels)
udpmatch, isUDPDeny := l4M.MatchesLabels(portStr, "UDP", labels)
sctpmatch, isSCTPDeny := l4M.MatchesLabels(portStr, "SCTP", labels)
if (!tcpmatch && !udpmatch && !sctpmatch) || (isTCPDeny && isUDPDeny && isSCTPDeny) {
return api.Denied
}
default:
matches, isDeny := l4M.MatchesLabels(portStr, lwrProtocol, labels)
if !matches || isDeny {
return api.Denied
}
}
}
return api.Allowed
}
type L4Policy struct {
Ingress L4DirectionPolicy
Egress L4DirectionPolicy
authMap authMap
// Revision is the repository revision used to generate this policy.
Revision uint64
// redirectTypes is a bitmap containing the types of redirect contained by this policy. It
// is computed after the policy maps to avoid scanning them repeatedly when using the
// L4Policy
redirectTypes redirectTypes
// Endpoint policies using this L4Policy
// These are circular references, cleaned up in Detach()
// This mutex is taken while Endpoint mutex is held, so Endpoint lock
// MUST always be taken before this mutex.
mutex lock.RWMutex
users map[*EndpointPolicy]struct{}
}
// NewL4Policy creates a new L4Policy
func NewL4Policy(revision uint64) L4Policy {
return L4Policy{
Ingress: newL4DirectionPolicy(),
Egress: newL4DirectionPolicy(),
Revision: revision,
users: make(map[*EndpointPolicy]struct{}),
}
}
// insertUser adds a user to the L4Policy so that incremental
// updates of the L4Policy may be forwarded to the users of it.
// May not call into SelectorCache, as SelectorCache is locked during this call.
func (l4 *L4Policy) insertUser(user *EndpointPolicy) {
l4.mutex.Lock()
// 'users' is set to nil when the policy is detached. This
// happens to the old policy when it is being replaced with a
// new one, or when the last endpoint using this policy is
// removed.
// In the case of an policy update it is possible that an
// endpoint has started regeneration before the policy was
// updated, and that the policy was updated before the said
// endpoint reached this point. In this case the endpoint's
// policy is going to be recomputed soon after and we do
// nothing here.
if l4.users != nil {
l4.users[user] = struct{}{}
}
l4.mutex.Unlock()
}
// removeUser removes a user that no longer needs incremental updates
// from the L4Policy.
func (l4 *L4Policy) removeUser(user *EndpointPolicy) {
// 'users' is set to nil when the policy is detached. This
// happens to the old policy when it is being replaced with a
// new one, or when the last endpoint using this policy is
// removed.
l4.mutex.Lock()
if l4.users != nil {
delete(l4.users, user)
}
l4.mutex.Unlock()
}
// AccumulateMapChanges distributes the given changes to the registered users.
//
// The caller is responsible for making sure the same identity is not
// present in both 'adds' and 'deletes'.
func (l4Policy *L4Policy) AccumulateMapChanges(l4 *L4Filter, cs CachedSelector, adds, deletes []identity.NumericIdentity) {
port := uint16(l4.Port)
proto := l4.U8Proto
derivedFrom := l4.RuleOrigin[cs]
direction := trafficdirection.Egress
if l4.Ingress {
direction = trafficdirection.Ingress
}
perSelectorPolicy := l4.PerSelectorPolicies[cs]
redirect := perSelectorPolicy.IsRedirect()
listener := perSelectorPolicy.GetListener()
priority := perSelectorPolicy.GetPriority()
authReq := perSelectorPolicy.getAuthRequirement()
isDeny := perSelectorPolicy != nil && perSelectorPolicy.IsDeny
// Can hold rlock here as neither GetNamedPort() nor LookupRedirectPort() no longer
// takes the Endpoint lock below.
// SelectorCache may not be called into while holding this lock!
l4Policy.mutex.RLock()
defer l4Policy.mutex.RUnlock()
for epPolicy := range l4Policy.users {
// resolve named port
if port == 0 && l4.PortName != "" {
port = epPolicy.PolicyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto)
if port == 0 {
continue
}
}
var proxyPort uint16
if redirect {
var err error
proxyPort, err = epPolicy.LookupRedirectPort(l4.Ingress, string(l4.Protocol), port, listener)
if err != nil {
log.WithFields(logrus.Fields{
logfields.EndpointSelector: cs,
logfields.Port: port,
logfields.Protocol: proto,
logfields.TrafficDirection: direction,
logfields.IsRedirect: redirect,
logfields.Listener: listener,
logfields.ListenerPriority: priority,
}).Warn("AccumulateMapChanges: Missing redirect.")
continue
}
}
var keysToAdd []Key
for _, mp := range PortRangeToMaskedPorts(port, l4.EndPort) {
keysToAdd = append(keysToAdd,
KeyForDirection(direction).WithPortProtoPrefix(proto, mp.port, uint8(bits.LeadingZeros16(^mp.mask))))
}
value := newMapStateEntry(derivedFrom, proxyPort, priority, isDeny, authReq)
if option.Config.Debug {
authString := "default"
if authReq.IsExplicit() {
authString = authReq.AuthType().String()
}
log.WithFields(logrus.Fields{
logfields.EndpointSelector: cs,
logfields.AddedPolicyID: adds,
logfields.DeletedPolicyID: deletes,
logfields.Port: port,
logfields.Protocol: proto,
logfields.TrafficDirection: direction,
logfields.IsRedirect: redirect,
logfields.AuthType: authString,
logfields.Listener: listener,
logfields.ListenerPriority: priority,
}).Debug("AccumulateMapChanges")
}
epPolicy.policyMapChanges.AccumulateMapChanges(adds, deletes, keysToAdd, value)
}
}
// SyncMapChanges marks earlier updates as completed
func (l4Policy *L4Policy) SyncMapChanges(l4 *L4Filter, txn *versioned.Tx) {
// SelectorCache may not be called into while holding this lock!
l4Policy.mutex.RLock()
for epPolicy := range l4Policy.users {
epPolicy.policyMapChanges.SyncMapChanges(txn)
}
l4Policy.mutex.RUnlock()
}
// Detach makes the L4Policy ready for garbage collection, removing
// circular pointer references.
// Note that the L4Policy itself is not modified in any way, so that it may still
// be used concurrently.
func (l4 *L4Policy) Detach(selectorCache *SelectorCache) {
l4.Ingress.Detach(selectorCache)
l4.Egress.Detach(selectorCache)
l4.mutex.Lock()
l4.users = nil
l4.mutex.Unlock()
}
// Attach makes all the L4Filters to point back to the L4Policy that contains them.
// This is done before the L4Policy is exposed to concurrent access.
func (l4 *L4Policy) Attach(ctx PolicyContext) {
ingressRedirects := l4.Ingress.attach(ctx, l4)
egressRedirects := l4.Egress.attach(ctx, l4)
l4.redirectTypes = ingressRedirects | egressRedirects
}
// IngressCoversContext checks if the receiver's ingress L4Policy contains
// all `dPorts` and `labels`.
//
// Note: Only used for policy tracing
func (l4M *l4PolicyMap) IngressCoversContext(ctx *SearchContext) api.Decision {
return l4M.containsAllL3L4(ctx.From, ctx.DPorts)
}
// EgressCoversContext checks if the receiver's egress L4Policy contains
// all `dPorts` and `labels`.
//
// Note: Only used for policy tracing
func (l4M *l4PolicyMap) EgressCoversContext(ctx *SearchContext) api.Decision {
return l4M.containsAllL3L4(ctx.To, ctx.DPorts)
}
// HasRedirect returns true if the L4 policy contains at least one port redirection
func (l4 *L4Policy) HasRedirect() bool {
return l4 != nil && l4.redirectTypes != redirectTypeNone
}
// HasEnvoyRedirect returns true if the L4 policy contains at least one port redirection to Envoy
func (l4 *L4Policy) HasEnvoyRedirect() bool {
return l4 != nil && l4.redirectTypes&redirectTypeEnvoy == redirectTypeEnvoy
}
// HasProxylibRedirect returns true if the L4 policy contains at least one port redirection to Proxylib
func (l4 *L4Policy) HasProxylibRedirect() bool {
return l4 != nil && l4.redirectTypes&redirectTypeProxylib == redirectTypeProxylib
}
func (l4 *L4Policy) GetModel() *models.L4Policy {
if l4 == nil {
return nil
}
ingress := []*models.PolicyRule{}
l4.Ingress.PortRules.ForEach(func(v *L4Filter) bool {
rulesBySelector := map[string][][]string{}
derivedFrom := labels.LabelArrayList{}
for sel, rules := range v.RuleOrigin {
lal := rules.GetLabelArrayList()
derivedFrom.MergeSorted(lal)
rulesBySelector[sel.String()] = lal.GetModel()
}
ingress = append(ingress, &models.PolicyRule{
Rule: v.Marshal(),
DerivedFromRules: derivedFrom.GetModel(),
RulesBySelector: rulesBySelector,
})
return true
})
egress := []*models.PolicyRule{}
l4.Egress.PortRules.ForEach(func(v *L4Filter) bool {
// TODO: Add RulesBySelector field like for ingress above?
derivedFrom := labels.LabelArrayList{}
for _, rules := range v.RuleOrigin {
lal := rules.GetLabelArrayList()
derivedFrom.MergeSorted(lal)
}
egress = append(egress, &models.PolicyRule{
Rule: v.Marshal(),
DerivedFromRules: derivedFrom.GetModel(),
})
return true
})
return &models.L4Policy{
Ingress: ingress,
Egress: egress,
}
}
// ProxyPolicy is any type which encodes state needed to redirect to an L7
// proxy.
type ProxyPolicy interface {
GetPerSelectorPolicies() L7DataMap
GetL7Parser() L7ParserType
GetIngress() bool
GetPort() uint16
GetProtocol() u8proto.U8proto
GetListener() string
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"fmt"
stdlog "log"
"sync"
"testing"
cilium "github.com/cilium/proxy/go/cilium/api"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/fqdn/re"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
testpolicy "github.com/cilium/cilium/pkg/testutils/policy"
)
var (
hostSelector = api.ReservedEndpointSelectors[labels.IDNameHost]
toFoo = &SearchContext{To: labels.ParseSelectLabelArray("foo")}
dummySelectorCacheUser = &testpolicy.DummySelectorCacheUser{}
fooSelector = api.NewESFromLabels(labels.ParseSelectLabel("foo"))
bazSelector = api.NewESFromLabels(labels.ParseSelectLabel("baz"))
selFoo = api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar1 = api.NewESFromLabels(labels.ParseSelectLabel("id=bar1"))
selBar2 = api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
)
type testData struct {
sc *SelectorCache
repo *Repository
testPolicyContext *testPolicyContextType
cachedSelectorA CachedSelector
cachedSelectorB CachedSelector
cachedSelectorC CachedSelector
cachedSelectorHost CachedSelector
wildcardCachedSelector CachedSelector
cachedFooSelector CachedSelector
cachedBazSelector CachedSelector
cachedSelectorBar1 CachedSelector
cachedSelectorBar2 CachedSelector
}
func newTestData() *testData {
td := &testData{
sc: testNewSelectorCache(nil),
repo: NewPolicyRepository(nil, nil, nil, nil, api.NewPolicyMetricsNoop()),
testPolicyContext: &testPolicyContextType{},
}
td.testPolicyContext.sc = td.sc
td.repo.selectorCache = td.sc
td.wildcardCachedSelector, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, api.WildcardEndpointSelector)
td.cachedSelectorA, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, endpointSelectorA)
td.cachedSelectorB, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, endpointSelectorB)
td.cachedSelectorC, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, endpointSelectorC)
td.cachedSelectorHost, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, hostSelector)
td.cachedFooSelector, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, fooSelector)
td.cachedBazSelector, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, bazSelector)
td.cachedSelectorBar1, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, selBar1)
td.cachedSelectorBar2, _ = td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, selBar2)
return td
}
// withIDs loads the set of IDs in to the SelectorCache. Returns
// the same testData for easy chaining.
func (td *testData) withIDs(initIDs ...identity.IdentityMap) *testData {
initial := identity.IdentityMap{}
for _, im := range initIDs {
for id, lbls := range im {
initial[id] = lbls
}
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(initial, nil, wg)
wg.Wait()
return td
}
// resetRepo clears only the policy repository.
// Some tests rely on the accumulated state, but a clean repo.
func (td *testData) resetRepo() *Repository {
td.repo.ReplaceByLabels(nil, []labels.LabelArray{{}})
return td.repo
}
func (td *testData) addIdentity(id *identity.Identity) {
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(
identity.IdentityMap{
id.ID: id.LabelArray,
}, nil, wg)
wg.Wait()
}
// testPolicyContexttype is a dummy context used when evaluating rules.
type testPolicyContextType struct {
isDeny bool
ns string
sc *SelectorCache
fromFile bool
}
func (p *testPolicyContextType) GetNamespace() string {
return p.ns
}
func (p *testPolicyContextType) GetSelectorCache() *SelectorCache {
return p.sc
}
func (p *testPolicyContextType) GetTLSContext(tls *api.TLSContext) (ca, public, private string, fromFile bool, err error) {
switch tls.Secret.Name {
case "tls-cert":
return "", "fake public cert", "fake private key", p.fromFile, nil
case "tls-ca-certs":
return "fake CA certs", "", "", p.fromFile, nil
}
return "", "", "", p.fromFile, fmt.Errorf("Unknown test secret '%s'", tls.Secret.Name)
}
func (p *testPolicyContextType) GetEnvoyHTTPRules(*api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) {
return nil, true
}
func (p *testPolicyContextType) SetDeny(isDeny bool) bool {
oldDeny := p.isDeny
p.isDeny = isDeny
return oldDeny
}
func (p *testPolicyContextType) IsDeny() bool {
return p.isDeny
}
func init() {
re.InitRegexCompileLRU(defaults.FQDNRegexCompileLRUSize)
}
// Tests in this file:
//
// How to read this table:
// Case: The test / subtest number.
// L3: Matches at L3 for rule 1, followed by rule 2.
// L4: Matches at L4.
// L7: Rules at L7 for rule 1, followed by rule 2.
// Notes: Extra information about the test.
//
// +-----+-----------------+----------+-----------------+------------------------------------------------------+
// |Case | L3 (1, 2) match | L4 match | L7 match (1, 2) | Notes |
// +=====+=================+==========+=================+======================================================+
// | 1A | *, * | 80/TCP | *, * | Allow all communication on the specified port |
// | 1B | -, - | 80/TCP | *, * | Deny all with an empty FromEndpoints slice |
// | 2A | *, * | 80/TCP | *, "GET /" | Rule 1 shadows rule 2 |
// | 2B | *, * | 80/TCP | "GET /", * | Same as 2A, but import in reverse order |
// | 3 | *, * | 80/TCP | "GET /","GET /" | Exactly duplicate rules (HTTP) |
// | 4 | *, * | 9092/TCP | "foo","foo" | Exactly duplicate rules (Kafka) |
// | 5A | *, * | 80/TCP | "foo","GET /" | Rules with conflicting L7 parser |
// | 5B | *, * | 80/TCP | "GET /","foo" | Same as 5A, but import in reverse order |
// | 6A | "id=a", * | 80/TCP | *, * | Rule 2 is a superset of rule 1 |
// | 6B | *, "id=a" | 80/TCP | *, * | Same as 6A, but import in reverse order |
// | 7A | "id=a", * | 80/TCP | "GET /", * | All traffic is allowed; traffic to A goes via proxy |
// | 7B | *, "id=a" | 80/TCP | *, "GET /" | Same as 7A, but import in reverse order |
// | 8A | "id=a", * | 80/TCP | "GET /","GET /" | Rule 2 is the same as rule 1, except matching all L3 |
// | 8B | *, "id=a" | 80/TCP | "GET /","GET /" | Same as 8A, but import in reverse order |
// | 9A | "id=a", * | 80/TCP | "foo","GET /" | Rules with conflicting L7 parser (+L3 match) |
// | 9B | *, "id=a" | 80/TCP | "GET /","foo" | Same as 9A, but import in reverse order |
// | 10 | "id=a", "id=c" | 80/TCP | "GET /","GET /" | Allow at L7 for two distinct labels (disjoint set) |
// | 11 | "id=a", "id=c" | 80/TCP | *, * | Allow at L4 for two distinct labels (disjoint set) |
// | 12 | "id=a", | 80/TCP | "GET /" | Configure to allow localhost traffic always |
// | 13 | -, - | 80/TCP | *, * | Deny all with an empty ToEndpoints slice |
// +-----+-----------------+----------+-----------------+------------------------------------------------------+
func TestMergeAllowAllL3AndAllowAllL7(t *testing.T) {
td := newTestData()
// Case 1A: Specify WildcardEndpointSelector explicitly.
td.repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
buffer := new(bytes.Buffer)
ctx := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err := td.repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.True(t, filter.SelectsAllEndpoints())
require.Equal(t, ParserTypeNone, filter.L7Parser)
require.Len(t, filter.PerSelectorPolicies, 1)
l4IngressPolicy.Detach(td.repo.GetSelectorCache())
// Case1B: an empty non-nil FromEndpoints does not select any identity.
td = newTestData()
td.repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
buffer = new(bytes.Buffer)
ctx = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err = td.repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.Nil(t, filter)
l4IngressPolicy.Detach(td.repo.GetSelectorCache())
}
// Case 2: allow all at L3 in both rules. Allow all in one L7 rule, but second
// rule restricts at L7. Because one L7 rule allows at L7, all traffic is allowed
// at L7, but still redirected at the proxy.
// Should resolve to one rule.
func TestMergeAllowAllL3AndShadowedL7(t *testing.T) {
td := newTestData()
rule1 := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctx := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
ingressState := traceState{}
res, err := rule1.resolveIngressPolicy(td.testPolicyContext, &ctx, &ingressState, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http",
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
require.EqualValues(t, expected, res)
require.Equal(t, 1, ingressState.selectedRules)
require.Equal(t, 1, ingressState.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
// Case 2B: Flip order of case 2A so that rule being merged with is different
// than rule being consumed.
td = newTestData()
td.repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
buffer = new(bytes.Buffer)
ctx = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctx.Logging = stdlog.New(buffer, "", 0)
l4IngressPolicy, err := td.repo.ResolveL4IngressPolicy(&ctx)
require.NoError(t, err)
t.Log(buffer)
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.True(t, filter.SelectsAllEndpoints())
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Len(t, filter.PerSelectorPolicies, 1)
l4IngressPolicy.Detach(td.repo.GetSelectorCache())
}
// Case 3: allow all at L3 in both rules. Both rules have same parser type and
// same API resource specified at L7 for HTTP.
func TestMergeIdenticalAllowAllL3AndRestrictedL7HTTP(t *testing.T) {
td := newTestData()
identicalHTTPRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state := traceState{}
res, err := identicalHTTPRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = identicalHTTPRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 4: identical allow all at L3 with identical restrictions on Kafka.
func TestMergeIdenticalAllowAllL3AndRestrictedL7Kafka(t *testing.T) {
td := newTestData()
identicalKafkaRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeKafka,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
state := traceState{}
res, err := identicalKafkaRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = identicalKafkaRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 5: use conflicting protocols on the same port in different rules. This
// is not supported, so return an error.
func TestMergeIdenticalAllowAllL3AndMismatchingParsers(t *testing.T) {
td := newTestData()
// Case 5A: Kafka first, HTTP second.
conflictingParsersRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state := traceState{}
res, err := conflictingParsersRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.Error(t, err)
require.Nil(t, res)
// Case 5B: HTTP first, Kafka second.
conflictingParsersRule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state = traceState{}
res, err = conflictingParsersRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.Error(t, err)
require.Nil(t, res)
// Case 5B+: HTTP first, generic L7 second.
conflictingParsersIngressRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testing",
L7: []api.PortRuleL7{
{"method": "PUT", "path": "/Foo"},
},
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = conflictingParsersIngressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
res, err = conflictingParsersIngressRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.Error(t, err)
require.Nil(t, res)
// Case 5B++: generic L7 without rules first, HTTP second.
conflictingParsersEgressRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testing",
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxAToC := SearchContext{From: labelsA, To: labelsC, Trace: TRACE_VERBOSE}
ctxAToC.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = conflictingParsersEgressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
res, err = conflictingParsersEgressRule.resolveEgressPolicy(td.testPolicyContext, &ctxAToC, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.Error(t, err)
require.Nil(t, res)
}
// TLS policies with and without interception
// TLS policy without L7 rules does not inspect L7, uses L7ParserType "tls"
func TestMergeTLSTCPPolicy(t *testing.T) {
td := newTestData()
egressRule := &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxFromFoo := SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err := egressRule.Sanitize()
require.NoError(t, err)
state := traceState{}
res, err := egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeTLS,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
TerminatingTLS: &TLSContext{
CertificateChain: "fake public cert",
PrivateKey: "fake private key",
Secret: types.NamespacedName{
Name: "tls-cert",
},
},
OriginatingTLS: &TLSContext{
Secret: types.NamespacedName{
Name: "tls-ca-certs",
},
TrustedCA: "fake CA certs",
},
EnvoyHTTPRules: nil,
CanShortCircuit: false,
L7Rules: api.L7Rules{},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
require.EqualValues(t, expected, res)
l4Filter := res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeTLS, l4Filter.L7Parser)
}
func TestMergeTLSHTTPPolicy(t *testing.T) {
td := newTestData()
egressRule := &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxFromFoo := SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err := egressRule.Sanitize()
require.NoError(t, err)
state := traceState{}
res, err := egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
TerminatingTLS: &TLSContext{
CertificateChain: "fake public cert",
PrivateKey: "fake private key",
Secret: types.NamespacedName{
Name: "tls-cert",
},
},
OriginatingTLS: &TLSContext{
Secret: types.NamespacedName{
Name: "tls-ca-certs",
},
TrustedCA: "fake CA certs",
},
EnvoyHTTPRules: nil,
CanShortCircuit: false,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
require.EqualValues(t, expected, res)
l4Filter := res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeHTTP, l4Filter.L7Parser)
}
func TestMergeTLSSNIPolicy(t *testing.T) {
td := newTestData()
egressRule := &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
TerminatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-cert",
},
},
OriginatingTLS: &api.TLSContext{
Secret: &api.Secret{
Name: "tls-ca-certs",
},
},
ServerNames: []string{"www.foo.com"},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
ServerNames: []string{"www.bar.com"},
}, {
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxFromFoo := SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err := egressRule.Sanitize()
require.NoError(t, err)
state := traceState{}
res, err := egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
TerminatingTLS: &TLSContext{
CertificateChain: "fake public cert",
PrivateKey: "fake private key",
Secret: types.NamespacedName{
Name: "tls-cert",
},
},
OriginatingTLS: &TLSContext{
Secret: types.NamespacedName{
Name: "tls-ca-certs",
},
TrustedCA: "fake CA certs",
},
ServerNames: StringSet{"www.foo.com": {}, "www.bar.com": {}},
EnvoyHTTPRules: nil,
CanShortCircuit: false,
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
require.EqualValues(t, expected, res)
require.True(t, res.TestingOnlyEquals(expected), res.TestingOnlyDiff(expected))
l4Filter := res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeHTTP, l4Filter.L7Parser)
}
func TestMergeListenerPolicy(t *testing.T) {
td := newTestData()
//
// no namespace in policyContext (Clusterwide policy): Can not refer to EnvoyConfig
//
egressRule := &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumEnvoyConfig",
Name: "test-cec",
},
Name: "test",
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxFromFoo := SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err := egressRule.Sanitize()
require.NoError(t, err)
state := traceState{}
res, err := egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.ErrorContains(t, err, "Listener \"test\" in CCNP can not use Kind CiliumEnvoyConfig")
require.Nil(t, res)
//
// no namespace in policyContext (Clusterwide policy): Must to ClusterwideEnvoyConfig
//
egressRule = &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumClusterwideEnvoyConfig",
Name: "shared-cec",
},
Name: "test",
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxFromFoo = SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = egressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
res, err = egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeCRD,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
EnvoyHTTPRules: nil,
CanShortCircuit: false,
isRedirect: true,
Listener: "/shared-cec/test",
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
require.EqualValues(t, expected, res)
l4Filter := res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeCRD, l4Filter.L7Parser)
//
// namespace in policyContext (Namespaced policy): Can refer to EnvoyConfig
//
egressRule = &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumEnvoyConfig",
Name: "test-cec",
},
Name: "test",
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxFromFoo = SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = egressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
td.testPolicyContext.ns = "default"
res, err = egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeCRD,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
EnvoyHTTPRules: nil,
CanShortCircuit: false,
isRedirect: true,
Listener: "default/test-cec/test",
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
require.EqualValues(t, expected, res)
l4Filter = res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeCRD, l4Filter.L7Parser)
//
// namespace in policyContext (Namespaced policy): Can refer to Cluster-socoped
// CiliumClusterwideEnvoyConfig
//
egressRule = &rule{
Rule: api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "443", Protocol: api.ProtoTCP},
},
Listener: &api.Listener{
EnvoyConfig: &api.EnvoyConfig{
Kind: "CiliumClusterwideEnvoyConfig",
Name: "shared-cec",
},
Name: "test",
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxFromFoo = SearchContext{From: labels.ParseSelectLabelArray("foo"), Trace: TRACE_VERBOSE}
ctxFromFoo.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
err = egressRule.Sanitize()
require.NoError(t, err)
state = traceState{}
td.testPolicyContext.ns = "default"
res, err = egressRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
// Since cachedSelectorA's map entry is 'nil', it will not be redirected to the proxy.
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"443/TCP": {
Port: 443,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeCRD,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil, // no proxy redirect
td.cachedSelectorC: &PerSelectorPolicy{
EnvoyHTTPRules: nil,
CanShortCircuit: false,
isRedirect: true,
Listener: "/shared-cec/test",
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
require.EqualValues(t, expected, res)
l4Filter = res.ExactLookup("443", 0, "TCP")
require.NotNil(t, l4Filter)
require.Equal(t, ParserTypeCRD, l4Filter.L7Parser)
}
// Case 6: allow all at L3/L7 in one rule, and select an endpoint and allow all on L7
// in another rule. Should resolve to just allowing all on L3/L7 (first rule
// shadows the second).
func TestL3RuleShadowedByL3AllowAll(t *testing.T) {
td := newTestData()
// Case 6A: Specify WildcardEndpointSelector explicitly.
shadowRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil,
td.wildcardCachedSelector: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
state := traceState{}
res, err := shadowRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Case 6B: Reverse the ordering of the rules. Result should be the same.
shadowRule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorA: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 7: allow all at L3/L7 in one rule, and in another rule, select an endpoint
// which restricts on L7. Should resolve to just allowing all on L3/L7 (first rule
// shadows the second), but setting traffic to the HTTP proxy.
func TestL3RuleWithL7RulePartiallyShadowedByL3AllowAll(t *testing.T) {
td := newTestData()
// Case 7A: selects specific endpoint with L7 restrictions rule first, then
// rule which selects all endpoints and allows all on L7. Net result sets
// parser type to whatever is in first rule, but without the restriction
// on L7.
shadowRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
state := traceState{}
res, err := shadowRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Case 7B: selects all endpoints and allows all on L7, then selects specific
// endpoint with L7 restrictions rule. Net result sets parser type to whatever
// is in first rule, but without the restriction on L7.
shadowRule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
td.cachedSelectorA: {nil},
}),
}})
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = shadowRule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 8: allow all at L3 and restricts on L7 in one rule, and in another rule,
// select an endpoint which restricts the same as the first rule on L7.
// Should resolve to just allowing all on L3, but restricting on L7 for both
// wildcard and the specified endpoint.
func TestL3RuleWithL7RuleShadowedByL3AllowAll(t *testing.T) {
td := newTestData()
// Case 8A: selects specific endpoint with L7 restrictions rule first, then
// rule which selects all endpoints and restricts on the same resource on L7.
// PerSelectorPolicies contains entries for both endpoints selected in each rule
// on L7 restriction.
case8Rule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
state := traceState{}
res, err := case8Rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = case8Rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Case 8B: first insert rule which selects all endpoints and restricts on
// the same resource on L7. Then, insert rule which selects specific endpoint
// with L7 restrictions rule. PerSelectorPolicies contains entries for both
// endpoints selected in each rule on L7 restriction.
case8Rule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
state = traceState{}
res, err = case8Rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
state = traceState{}
res, err = case8Rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 9: allow all at L3 and restricts on L7 in one rule, and in another rule,
// select an endpoint which restricts on different L7 protocol.
// Should fail as cannot have conflicting parsers on same port.
func TestL3SelectingEndpointAndL3AllowAllMergeConflictingL7(t *testing.T) {
td := newTestData()
// Case 9A: Kafka first, then HTTP.
conflictingL7Rule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state := traceState{}
res, err := conflictingL7Rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.Error(t, err)
require.Nil(t, res)
state = traceState{}
res, err = conflictingL7Rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
// Case 9B: HTTP first, then Kafka.
conflictingL7Rule = &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
},
}
buffer = new(bytes.Buffer)
ctxToA = SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state = traceState{}
res, err = conflictingL7Rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.Error(t, err)
require.Nil(t, res)
state = traceState{}
res, err = conflictingL7Rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 10: restrict same path / method on L7 in both rules,
// but select different endpoints in each rule.
func TestMergingWithDifferentEndpointsSelectedAllowSameL7(t *testing.T) {
td := newTestData()
selectDifferentEndpointsRestrictL7 := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
td.cachedSelectorA: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
state := traceState{}
res, err := selectDifferentEndpointsRestrictL7.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
buffer = new(bytes.Buffer)
ctxToC := SearchContext{To: labelsC, Trace: TRACE_VERBOSE}
ctxToC.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state = traceState{}
res, err = selectDifferentEndpointsRestrictL7.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 11: allow all on L7 in both rules, but select different endpoints in each rule.
func TestMergingWithDifferentEndpointSelectedAllowAllL7(t *testing.T) {
td := newTestData()
selectDifferentEndpointsAllowAllL7 := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorA},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: nil,
L7Parser: ParserTypeNone,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorA: nil,
td.cachedSelectorC: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorA: {nil},
td.cachedSelectorC: {nil},
}),
}})
state := traceState{}
res, err := selectDifferentEndpointsAllowAllL7.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
buffer = new(bytes.Buffer)
ctxToC := SearchContext{To: labelsC, Trace: TRACE_VERBOSE}
ctxToC.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state = traceState{}
res, err = selectDifferentEndpointsAllowAllL7.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// Case 12: allow all at L3 in one rule with restrictions at L7. Determine that
// the host should always be allowed. From Host should go to proxy allow all;
// other L3 should restrict at L7 in a separate filter.
func TestAllowingLocalhostShadowsL7(t *testing.T) {
td := newTestData()
// This test checks that when the AllowLocalhost=always option is
// enabled, we always wildcard the host at L7. That means we need to
// set the option in the config, and of course clean up afterwards so
// that this test doesn't affect subsequent tests.
// XXX: Does this affect other tests being run concurrently?
oldLocalhostOpt := option.Config.AllowLocalhost
option.Config.AllowLocalhost = option.AllowLocalhostAlways
defer func() { option.Config.AllowLocalhost = oldLocalhostOpt }()
rule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxToA := SearchContext{To: labelsA, Trace: TRACE_VERBOSE}
ctxToA.Logging = stdlog.New(buffer, "", 0)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
td.cachedSelectorHost: nil, // no proxy redirect
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
state := traceState{}
res, err := rule.resolveIngressPolicy(td.testPolicyContext, &ctxToA, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.NotNil(t, res)
require.EqualValues(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
// Endpoints not selected by the rule should not match the rule.
buffer = new(bytes.Buffer)
ctxToC := SearchContext{To: labelsC, Trace: TRACE_VERBOSE}
ctxToC.Logging = stdlog.New(buffer, "", 0)
state = traceState{}
res, err = rule.resolveIngressPolicy(td.testPolicyContext, toFoo, &state, NewL4PolicyMap(), nil, nil)
t.Log(buffer)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 0, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
func TestEntitiesL3(t *testing.T) {
td := newTestData()
allowWorldRule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityAll},
},
},
},
},
}
buffer := new(bytes.Buffer)
ctxFromA := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctxFromA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
state := traceState{}
res, err := allowWorldRule.resolveEgressPolicy(td.testPolicyContext, &ctxFromA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, expected, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 1, state.matchedRules)
res.Detach(td.sc)
expected.Detach(td.sc)
}
// Case 13: deny all at L3 in case of an empty non-nil toEndpoints slice.
func TestEgressEmptyToEndpoints(t *testing.T) {
td := newTestData()
rule := &rule{
Rule: api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
}
buffer := new(bytes.Buffer)
ctxFromA := SearchContext{From: labelsA, Trace: TRACE_VERBOSE}
ctxFromA.Logging = stdlog.New(buffer, "", 0)
t.Log(buffer)
state := traceState{}
res, err := rule.resolveEgressPolicy(td.testPolicyContext, &ctxFromA, &state, NewL4PolicyMap(), nil, nil)
require.NoError(t, err)
require.Nil(t, res)
require.Equal(t, 1, state.selectedRules)
require.Equal(t, 0, state.matchedRules)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
// This includes some utilities for simulating policy verdicts.
//
// It is only used for tests, but is used by multiple packages.
import (
"fmt"
"log/slog"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/spanstat"
"github.com/cilium/cilium/pkg/u8proto"
)
type Flow struct {
From, To *identity.Identity
Proto u8proto.U8proto
Dport uint16
}
type EndpointInfo struct {
ID uint64
TCPNamedPorts map[string]uint16
UDPNamedPorts map[string]uint16
Logger *slog.Logger
// Used when determining peer named ports
remoteEndpoint *EndpointInfo
}
// LookupFlow determines the policy verdict for a given flow.
//
// The flow's identities must have been loaded in to the repository's SelectorCache,
// or policy will not be correctly computed.
//
// This function is only used for testing, but in multiple packages.
//
// TODO: add support for redirects
func LookupFlow(repo PolicyRepository, flow Flow, srcEP, dstEP *EndpointInfo) (api.Decision, error) {
if flow.From.ID == 0 || flow.To.ID == 0 {
return api.Undecided, fmt.Errorf("cannot lookup flow: numeric IDs missing")
}
if _, exists := repo.GetSelectorCache().idCache[flow.From.ID]; !exists {
return api.Undecided, fmt.Errorf("From.ID not in SelectorCache!")
}
if _, exists := repo.GetSelectorCache().idCache[flow.To.ID]; !exists {
return api.Undecided, fmt.Errorf("To.ID not in SelectorCache!")
}
if flow.Dport == 0 {
return api.Undecided, fmt.Errorf("cannot lookup flow: port number missing")
}
if flow.Proto == 0 {
return api.Undecided, fmt.Errorf("cannot lookup flow: protocol missing")
}
if srcEP == nil {
srcEP = &EndpointInfo{
ID: uint64(flow.From.ID),
}
}
if dstEP == nil {
dstEP = &EndpointInfo{
ID: uint64(flow.To.ID),
}
}
srcEP.remoteEndpoint = dstEP
dstEP.remoteEndpoint = srcEP
// Resolve and look up the flow as egress from the source
selPolSrc, _, err := repo.GetSelectorPolicy(flow.From, 0, &dummyPolicyStats{})
if err != nil {
return api.Undecided, fmt.Errorf("GetSelectorPolicy(from) failed: %w", err)
}
epp := selPolSrc.DistillPolicy(srcEP, nil)
epp.Ready()
epp.Detach()
key := EgressKey().WithIdentity(flow.To.ID).WithPortProto(flow.Proto, flow.Dport)
entry, _, _ := epp.Lookup(key)
if entry.IsDeny() {
return api.Denied, nil
}
// Resolve ingress policy for destination
selPolDst, _, err := repo.GetSelectorPolicy(flow.To, 0, &dummyPolicyStats{})
if err != nil {
return api.Undecided, fmt.Errorf("GetSelectorPolicy(to) failed: %w", err)
}
epp = selPolDst.DistillPolicy(dstEP, nil)
epp.Ready()
epp.Detach()
key = IngressKey().WithIdentity(flow.From.ID).WithPortProto(flow.Proto, flow.Dport)
entry, _, _ = epp.Lookup(key)
if entry.IsDeny() {
return api.Denied, nil
}
return api.Allowed, nil
}
var _ PolicyOwner = &EndpointInfo{}
func (ei *EndpointInfo) GetID() uint64 {
return ei.ID
}
// GetNamedPort determines the named port of the *destination*. So, if ingress
// is false, then this looks up the peer.
func (ei *EndpointInfo) GetNamedPort(ingress bool, name string, proto u8proto.U8proto) uint16 {
if !ingress && ei.remoteEndpoint != nil {
return ei.remoteEndpoint.GetNamedPort(true, name, proto)
}
switch {
case proto == u8proto.TCP && ei.TCPNamedPorts != nil:
return ei.TCPNamedPorts[name]
case proto == u8proto.UDP && ei.UDPNamedPorts != nil:
return ei.UDPNamedPorts[name]
}
return 0
}
func (ei *EndpointInfo) PolicyDebug(fields logrus.Fields, msg string) {
if ei.Logger != nil {
args := make([]any, 0, len(fields)*2)
for k, v := range fields {
args = append(args, k, v)
}
ei.Logger.Debug(msg, args...)
}
}
func (ei *EndpointInfo) IsHost() bool {
return false
}
// MapStateSize returns the size of the current desired policy map, used for preallocation of the
// new map. Return 0 here as this is only used for testing.
func (ei *EndpointInfo) MapStateSize() int {
return 0
}
type dummyPolicyStats struct {
waitingForPolicyRepository spanstat.SpanStat
policyCalculation spanstat.SpanStat
}
func (s *dummyPolicyStats) WaitingForPolicyRepository() *spanstat.SpanStat {
return &s.waitingForPolicyRepository
}
func (s *dummyPolicyStats) SelectorPolicyCalculation() *spanstat.SpanStat {
return &s.policyCalculation
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"iter"
"github.com/hashicorp/go-hclog"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/container/bitlpm"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/policy/types"
)
// Key and Keys are types used both internally and externally.
// The types have been lifted out, but an alias is being used
// so we don't have to change all the code everywhere.
//
// Do not use these types outside of pkg/policy or pkg/endpoint,
// lest ye find yourself with hundreds of unnecessary imports.
type Key = types.Key
type Keys = types.Keys
type MapStateEntry = types.MapStateEntry
const NoAuthRequirement = types.NoAuthRequirement
// Map type for external use. Internally we have more detail in private 'mapStateEntry' type,
// as well as more extensive indexing via tries.
type MapStateMap map[Key]MapStateEntry
type mapStateMap map[Key]mapStateEntry
func EgressKey() types.Key {
return types.EgressKey()
}
func IngressKey() types.Key {
return types.IngressKey()
}
func KeyForDirection(direction trafficdirection.TrafficDirection) Key {
return types.KeyForDirection(direction)
}
var (
// localHostKey represents an ingress L3 allow from the local host.
localHostKey = IngressKey().WithIdentity(identity.ReservedIdentityHost)
// allKey represents a key for unknown traffic, i.e., all traffic.
// We have one for each traffic direction
allKey = [2]Key{
IngressKey(),
EgressKey(),
}
)
const (
LabelKeyPolicyDerivedFrom = "io.cilium.policy.derived-from"
LabelAllowLocalHostIngress = "allow-localhost-ingress"
LabelAllowAnyIngress = "allow-any-ingress"
LabelAllowAnyEgress = "allow-any-egress"
)
var (
LabelsAllowAnyIngress = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved)}
LabelsAllowAnyEgress = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved)}
LabelsLocalHostIngress = labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowLocalHostIngress, labels.LabelSourceReserved)}
)
// mapState is an indexed container for policymap keys and entries.
//
// The `bitlpm.Trie` indexes the TrafficDirection, Protocol, and Port of
// a policy Key but does **not** index the identity. Instead identities
// that share TrafficDirection, Protocol, and Port are indexed in a builtin
// map type that is the associated value of the key-prefix of TrafficDirection,
// Protocol, and Port. This is done so that Identity does not explode
// the size of the Trie. Consider the case of a policy that selects
// many identities. In this case, if Identity was indexed then every
// identity associated with the policy would create at least one
// intermediate node in the Trie with its own sub node associated with
// TrafficDirection, Protocol, and Port. When identity is not indexed
// then one policy will map to one key-prefix with a builtin map type
// that associates each identity with a mapStateEntry. This strategy
// greatly enhances the usefuleness of the Trie and improves lookup,
// deletion, and insertion times.
type mapState struct {
// entries is the map containing the MapStateEntries
entries mapStateMap
// trie is a Trie that indexes policy Keys without their identity
// and stores the identities in an associated builtin map.
trie bitlpm.Trie[types.LPMKey, IDSet]
}
type IDSet map[identity.NumericIdentity]struct{}
// Valid returns true if the entries map has been initialized
func (ms *mapState) Valid() bool {
return ms.entries != nil
}
func (ms *mapState) Empty() bool {
return len(ms.entries) == 0
}
func (ms *mapState) upsert(k Key, e mapStateEntry) {
_, exists := ms.entries[k]
// upsert entry
ms.entries[k] = e
// Update indices if 'k' is a new key
if !exists {
// Update trie
idSet, ok := ms.trie.ExactLookup(k.PrefixLength(), k.LPMKey)
if !ok {
idSet = make(IDSet)
ms.trie.Upsert(k.PrefixLength(), k.LPMKey, idSet)
}
idSet[k.Identity] = struct{}{}
}
}
func (ms *mapState) delete(k Key) {
_, exists := ms.entries[k]
if exists {
delete(ms.entries, k)
idSet, ok := ms.trie.ExactLookup(k.PrefixLength(), k.LPMKey)
if ok {
delete(idSet, k.Identity)
if len(idSet) == 0 {
ms.trie.Delete(k.PrefixLength(), k.LPMKey)
}
}
}
}
func (ms *mapState) ForEach(f func(Key, MapStateEntry) bool) bool {
for k, e := range ms.entries {
if !f(k, e.MapStateEntry) {
return false
}
}
return true
}
func (ms *mapState) forEach(f func(Key, mapStateEntry) bool) bool {
for k, e := range ms.entries {
if !f(k, e) {
return false
}
}
return true
}
func (ms *mapState) forKey(k Key, f func(Key, mapStateEntry) bool) bool {
e, ok := ms.entries[k]
if ok {
return f(k, e)
}
log.WithFields(logrus.Fields{
logfields.Stacktrace: hclog.Stacktrace(),
logfields.PolicyKey: k,
}).Errorf("Missing MapStateEntry")
return true
}
// forIDs calls 'f' for each ID in 'idSet' with port/proto from 'k'.
func (ms *mapState) forIDs(k Key, idSet IDSet, f func(Key, mapStateEntry) bool) bool {
for id := range idSet {
k.Identity = id
if !ms.forKey(k, f) {
return false
}
}
return true
}
// forID calls 'f' for 'k' if 'k.Identity' exists in 'idSet'.
func (ms *mapState) forID(k Key, idSet IDSet, f func(Key, mapStateEntry) bool) bool {
if _, exists := idSet[k.Identity]; exists {
if !ms.forKey(k, f) {
return false
}
}
return true
}
// BroaderOrEqualKeys iterates over broader or equal (broader or equal port/proto and the same
// or wildcard ID) in the trie.
func (ms *mapState) BroaderOrEqualKeys(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.AncestorIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// ANY identity is broader or equal to all identities, visit it first if it exists
if !ms.forID(k.WithIdentity(0), idSet, yield) {
return
}
// Visit key with the same identity, if it exists.
// ANY identity was already visited above.
if key.Identity != 0 && !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
}
}
}
// NarrowerOrEqualKeys iterates over narrower or equal keys in the trie.
// Iterated keys can be safely deleted during iteration due to DescendantIterator holding enough
// state that allows iteration to be continued even if the current trie node is removed.
func (ms *mapState) NarrowerOrEqualKeys(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.DescendantIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// All identities are narrower or equal to ANY identity.
if key.Identity == 0 {
if !ms.forIDs(k, idSet, yield) {
return
}
} else { // key has a specific identity
// Need to visit the key with the same identity, if it exists.
if !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
}
}
}
}
// CoveringKeysWithSameID iterates over broader port/proto entries in the trie in LPM order,
// with most specific match with the same ID as in 'key' being returned first.
func (ms *mapState) CoveringKeysWithSameID(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.AncestorLongestPrefixFirstIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// Visit key with the same identity, if port/proto is different.
if !k.PortProtoIsEqual(key) && !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
}
}
}
// SubsetKeysWithSameID iterates over narrower or equal port/proto entries in the trie in an LPM
// order (least specific match first).
func (ms *mapState) SubsetKeysWithSameID(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.DescendantShortestPrefixFirstIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// Visit key with the same identity, if port/proto is different.
if !k.PortProtoIsEqual(key) && !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
}
}
}
// LPMAncestors iterates over broader or equal port/proto entries in the trie in LPM order,
// with most specific match with the same ID as in 'key' being returned first.
func (ms *mapState) LPMAncestors(key Key) iter.Seq2[Key, mapStateEntry] {
return func(yield func(Key, mapStateEntry) bool) {
iter := ms.trie.AncestorLongestPrefixFirstIterator(key.PrefixLength(), key.LPMKey)
for ok, lpmKey, idSet := iter.Next(); ok; ok, lpmKey, idSet = iter.Next() {
k := Key{LPMKey: lpmKey}
// Visit key with the same identity, if one exists.
if !ms.forID(k.WithIdentity(key.Identity), idSet, yield) {
return
}
// Then visit key with zero identity if not already done above and one
// exists.
if key.Identity != 0 && !ms.forID(k.WithIdentity(0), idSet, yield) {
return
}
}
}
}
// lookup finds the policy verdict applicable to the given 'key' using the same precedence logic
// between L3 and L4-only policies as the bpf datapath when both match the given 'key'.
// To be used in testing in place of the bpf datapath when full integration testing is not desired.
// Returns the closest matching covering policy entry and 'true' if found.
// 'key' must not have a wildcard identity or port.
func (ms *mapState) lookup(key Key) (mapStateEntry, bool) {
// Validate that the search key has no wildcards
if key.Identity == 0 || key.Nexthdr == 0 || key.DestPort == 0 || key.EndPort() != key.DestPort {
panic("invalid key for Lookup")
}
var l3key, l4key Key
var l3entry, l4entry mapStateEntry
var haveL3, haveL4 bool
for k, v := range ms.LPMAncestors(key) {
if !haveL3 && k.Identity != 0 {
l3key, l3entry = k, v
haveL3 = true
}
if !haveL4 && k.Identity == 0 {
l4key, l4entry = k, v
haveL4 = true
}
if haveL3 && haveL4 {
break
}
}
authOverride := func(entry, other mapStateEntry) mapStateEntry {
// This logic needs to be the same as in authPreferredInsert() where the newEntry's
// auth type may be overridden by a covering key.
// This also needs to reflect the logic in bpf/lib/policy.h __account_and_check().
if !entry.AuthRequirement.IsExplicit() &&
other.AuthRequirement.AuthType() > entry.AuthRequirement.AuthType() {
entry.AuthRequirement = other.AuthRequirement.AsDerived()
}
return entry
}
// only one entry found
if haveL3 != haveL4 {
if haveL3 {
return l3entry, true
}
return l4entry, true
}
// both L3 and L4 matches found
if haveL3 && haveL4 {
// Precedence rules of the bpf datapath between two policy entries:
// 1. Deny is selected, if any
// 2. Higher proxy port priority wins
// 3. If both entries are allows at the same proxy port priority, the one with more
// specific L4 is selected
// 4. If the two allows on the same proxy port priority have equal port/proto, then
// the policy for a specific L3 is selected (rather than the L4-only entry)
//
// If the selected entry has non-explicit auth type, it gets the auth type from the
// other entry, if the other entry's auth type is numerically higher.
// 1. Deny wins
// Check for the L3 deny first to match the datapath behavior
if l3entry.IsDeny() {
return l3entry, true
}
if l4entry.IsDeny() {
return l4entry, true
}
// 2. Entry with higher proxy port priority is selected.
// Auth requirement does not propagate from a lower proxy port priority rule to a
// higher proxy port priority rule!
if l3entry.ProxyPortPriority > l4entry.ProxyPortPriority {
return l3entry, true
}
if l4entry.ProxyPortPriority > l3entry.ProxyPortPriority {
return l4entry, true
}
// 3. Two allow entries, select the one with more specific L4
// L3-entry must be selected if prefix lengths are the same!
if l4key.PrefixLength() > l3key.PrefixLength() {
return authOverride(l4entry, l3entry), true
}
// 4. Two allow entries are equally specific port/proto or L3-entry is more specific
return authOverride(l3entry, l4entry), true
}
// Deny by default if no matches are found
return mapStateEntry{MapStateEntry: types.DenyEntry(), derivedFromRules: NilRuleOrigin}, false
}
func (ms *mapState) Len() int {
return len(ms.entries)
}
// mapStateEntry is the entry type with additional internal bookkeping of the relation between
// explicitly and implicitly added entries.
type mapStateEntry struct {
MapStateEntry
// derivedFromRules tracks the policy rules this entry derives from.
// Must be initialized explicitly, zero-intialization does not work with unique.Handle[].
derivedFromRules ruleOrigin
}
// newMapStateEntry creates a map state entry.
func newMapStateEntry(derivedFrom ruleOrigin, proxyPort uint16, priority uint8, deny bool, authReq AuthRequirement) mapStateEntry {
return mapStateEntry{
MapStateEntry: types.NewMapStateEntry(deny, proxyPort, priority, authReq),
derivedFromRules: derivedFrom,
}
}
// newAllowEntryWithLabels creates an allow entry with the specified labels.
// Used for adding allow-all entries when policy enforcement is not wanted.
func newAllowEntryWithLabels(lbls labels.LabelArray) mapStateEntry {
return newMapStateEntry(singleRuleOrigin(makeStringLabels(lbls)), 0, 0, false, NoAuthRequirement)
}
func NewMapStateEntry(e MapStateEntry) mapStateEntry {
return mapStateEntry{
MapStateEntry: e,
derivedFromRules: NilRuleOrigin,
}
}
func emptyMapState() mapState {
return newMapState(0)
}
func newMapState(size int) mapState {
return mapState{
entries: make(mapStateMap, size),
trie: bitlpm.NewTrie[types.LPMKey, IDSet](types.MapStatePrefixLen),
}
}
// Get the MapStateEntry that matches the Key.
func (ms *mapState) Get(k Key) (MapStateEntry, bool) {
v, ok := ms.get(k)
if ok {
return v.MapStateEntry, ok
}
return MapStateEntry{}, false
}
// Get the mapStateEntry that matches the Key.
func (ms *mapState) get(k Key) (mapStateEntry, bool) {
if k.DestPort == 0 && k.PortPrefixLen() > 0 {
log.WithFields(logrus.Fields{
logfields.Stacktrace: hclog.Stacktrace(),
logfields.PolicyKey: k,
}).Errorf("mapState.Get: invalid port prefix length for wildcard port")
}
v, ok := ms.entries[k]
return v, ok
}
// insert the Key and MapStateEntry into the MapState
func (ms *mapState) insert(k Key, v mapStateEntry) {
if k.DestPort == 0 && k.PortPrefixLen() > 0 {
log.WithFields(logrus.Fields{
logfields.Stacktrace: hclog.Stacktrace(),
logfields.PolicyKey: k,
}).Errorf("mapState.insert: invalid port prefix length for wildcard port")
}
ms.upsert(k, v)
}
// updateExisting re-inserts an existing entry to its map, to be used to persist changes in the
// entry. Indices are not updated.
func (ms *mapState) updateExisting(k Key, v mapStateEntry) {
ms.entries[k] = v
}
// Equals determines if this MapState is equal to the
// argument (exported) MapStateMap
// Only used for testing from other packages.
func (msA *mapState) Equals(msB MapStateMap) bool {
if msA.Len() != len(msB) {
return false
}
return msA.forEach(func(kA Key, vA mapStateEntry) bool {
vB, ok := msB[kA]
return ok && vB == vA.MapStateEntry
})
}
// Equal determines if this mapState is equal to the argument mapState.
// Only used for testing.
func (msA *mapState) Equal(msB *mapState) bool {
if msA.Len() != msB.Len() {
return false
}
return msA.forEach(func(kA Key, vA mapStateEntry) bool {
vB, ok := msB.get(kA)
return ok && (&vB).Equal(&vA)
})
}
// Diff returns the string of differences between 'obtained' and 'expected' prefixed with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging from other packages.
func (obtained MapStateMap) Diff(expected MapStateMap) (res string) {
res += "Missing (-), Unexpected (+):\n"
for kE, vE := range expected {
if vO, ok := obtained[kE]; ok {
if vO != vE {
res += "- " + kE.String() + ": " + vE.String() + "\n"
res += "+ " + kE.String() + ": " + vO.String() + "\n"
}
} else {
res += "- " + kE.String() + ": " + vE.String() + "\n"
}
}
for kO, vO := range obtained {
if _, ok := expected[kO]; !ok {
res += "+ " + kO.String() + ": " + vO.String() + "\n"
}
}
return res
}
// Diff returns the string of differences between 'obtained' and 'expected' prefixed with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging from other packages.
func (obtained *mapState) Diff(expected MapStateMap) (res string) {
res += "Missing (-), Unexpected (+):\n"
for kE, vE := range expected {
if vO, ok := obtained.get(kE); ok {
if vO.MapStateEntry != vE {
res += "- " + kE.String() + ": " + vE.String() + "\n"
res += "+ " + kE.String() + ": " + vO.MapStateEntry.String() + "\n"
}
} else {
res += "- " + kE.String() + ": " + vE.String() + "\n"
}
}
obtained.ForEach(func(kO Key, vO MapStateEntry) bool {
if _, ok := expected[kO]; !ok {
res += "+ " + kO.String() + ": " + vO.String() + "\n"
}
return true
})
return res
}
// diff returns the string of differences between 'obtained' and 'expected' prefixed with
// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively.
// For use in debugging.
func (obtained *mapState) diff(expected *mapState) (res string) {
res += "Missing (-), Unexpected (+):\n"
expected.forEach(func(kE Key, vE mapStateEntry) bool {
if vO, ok := obtained.get(kE); ok {
if !(&vO).Equal(&vE) {
res += "- " + kE.String() + ": " + vE.String() + "\n"
res += "+ " + kE.String() + ": " + vO.String() + "\n"
}
} else {
res += "- " + kE.String() + ": " + vE.String() + "\n"
}
return true
})
obtained.forEach(func(kO Key, vO mapStateEntry) bool {
if _, ok := expected.get(kO); !ok {
res += "+ " + kO.String() + ": " + vO.String() + "\n"
}
return true
})
return res
}
func (ms mapState) String() (res string) {
ms.forEach(func(kO Key, vO mapStateEntry) bool {
res += kO.String() + ": " + vO.String() + "\n"
return true
})
return res
}
// Equal returns true of two entries are equal.
// This is used for testing only via mapState.Equal and mapState.Diff.
func (e *mapStateEntry) Equal(o *mapStateEntry) bool {
if e == nil || o == nil {
return e == o
}
return e.MapStateEntry == o.MapStateEntry && e.derivedFromRules == o.derivedFromRules
}
// String returns a string representation of the MapStateEntry
func (e mapStateEntry) String() string {
return e.MapStateEntry.String() + ",derivedFromRules=" + e.derivedFromRules.Value()
}
// addKeyWithChanges adds a 'key' with value 'entry' to 'keys' keeping track of incremental changes in 'adds' and 'deletes', and any changed or removed old values in 'old', if not nil.
func (ms *mapState) addKeyWithChanges(key Key, entry mapStateEntry, changes ChangeState) bool {
var datapathEqual bool
oldEntry, exists := ms.get(key)
// Only merge if both old and new are allows or denies
if exists && oldEntry.IsDeny() == entry.IsDeny() {
// Do nothing if entries are equal
if entry.Equal(&oldEntry) {
return false // nothing to do
}
// Save old value before any changes, if desired
changes.insertOldIfNotExists(key, oldEntry)
// Compare for datapath equalness before merging, as the old entry is updated in
// place!
datapathEqual = oldEntry.MapStateEntry == entry.MapStateEntry
oldEntry.MapStateEntry.Merge(entry.MapStateEntry)
if entry.derivedFromRules.Value() != "" {
oldEntry.derivedFromRules.Merge(entry.derivedFromRules)
}
ms.updateExisting(key, oldEntry)
} else if !exists || entry.IsDeny() {
// Insert a new entry if one did not exist or a deny entry is overwriting an allow
// entry
// Save old value before any changes, if any
if exists {
changes.insertOldIfNotExists(key, oldEntry)
}
// Callers already have cloned the containers, no need to do it again here
ms.insert(key, entry)
} else {
// Do not record and incremental add if nothing was done
return false
}
// Record an incremental Add if desired and entry is new or changed
if changes.Adds != nil && (!exists || !datapathEqual) {
changes.Adds[key] = struct{}{}
// Key add overrides any previous delete of the same key
if changes.Deletes != nil {
delete(changes.Deletes, key)
}
}
return true
}
// deleteKeyWithChanges deletes a 'key' from 'ms' keeping track of incremental changes in 'changes'
func (ms *mapState) deleteKeyWithChanges(key Key, changes ChangeState) {
if entry, exists := ms.get(key); exists {
// Save old value before any changes, if desired
changes.insertOldIfNotExists(key, entry)
if changes.Deletes != nil {
changes.Deletes[key] = struct{}{}
// Remove a potential previously added key
if changes.Adds != nil {
delete(changes.Adds, key)
}
}
ms.delete(key)
}
}
// RevertChanges undoes changes to 'keys' as indicated by 'changes.adds' and 'changes.old' collected
// via insertWithChanges().
func (ms *mapState) revertChanges(changes ChangeState) {
for k := range changes.Adds {
ms.delete(k)
}
// 'old' contains all the original values of both modified and deleted entries
for k, v := range changes.old {
ms.insert(k, v)
}
}
// insertWithChanges contains the most important business logic for policy insertions. It inserts a
// key and entry into the map only if not covered by a deny entry.
//
// Whenever the bpf datapath finds both L4-only and L3/L4 matching policy entries for a given
// packet, it uses the following logic to choose the policy entry:
// 1. Deny is selected, if any
// 2. Among two allows the one with higher proxy port priority is selected
// 3. Otherwise, the L4-only entry is chosen if it has more specific port/proto than
// the L3/L4 entry
// 4. Otherwise the L3/L4 entry is chosen
//
// This selects the higher precedence rule either by the deny status, or by the more
// specific L4, and for the L3/L4 entry overwise. This means that it suffices to manage
// deny precedence among the keys with the same ID here, the datapath take care of the precedence
// between different IDs (that is, between a specific ID and the wildcard ID (==0)
//
// Note on bailed or deleted entries:
//
// It would seem like that when we bail out due to being covered by an existing entry, or delete an
// entry due to being covered by the new one, we would want this action reversed if the existing
// entry or this new one is incremantally removed, respectively.
//
// Consider these facts:
// 1. Whenever a key covers an another, the covering key has broader or equal
// protocol/port, and the keys have the same identity, or the covering key has wildcard identity
// (ID == 0).
// 2. Only keys with a specific identity (ID != 0) can be incrementally added or deleted.
// 3. Due to the selector cache being transactional, when an identity is removed, all keys
// with that identity are incrementally deleted.
//
// Hence, if a covering key is incrementally deleted, it is a key with a specific identity (2), and
// all keys covered by it will be deleted as well (3), so there is no situation where this
// bailed-out or deleted key should be reinstated due to the covering key being incrementally
// deleted.
//
// Incremental changes performed are recorded in 'changes'.
func (ms *mapState) insertWithChanges(newKey Key, newEntry mapStateEntry, features policyFeatures, changes ChangeState) {
if newEntry.IsDeny() {
// Bail if covered by another (different) deny key
for k, v := range ms.BroaderOrEqualKeys(newKey) {
if v.IsDeny() && k != newKey {
return
}
}
// Delete covered allows and denies with a different key
for k, v := range ms.NarrowerOrEqualKeys(newKey) {
if !v.IsDeny() || k != newKey {
ms.deleteKeyWithChanges(k, changes)
}
}
} else {
// Bail if covered by a deny key or a key with a higher proxy port priority.
//
// This can be skipped if no rules have denies or proxy redirects
if features.contains(denyRules | redirectRules) {
for _, v := range ms.BroaderOrEqualKeys(newKey) {
if v.IsDeny() || v.ProxyPortPriority > newEntry.ProxyPortPriority {
return
}
}
}
// Delete covered allow entries with lower proxy port priority.
//
// This can be skipped if no rules have proxy redirects
if features.contains(redirectRules) {
for k, v := range ms.NarrowerOrEqualKeys(newKey) {
if !v.IsDeny() && v.ProxyPortPriority < newEntry.ProxyPortPriority {
ms.deleteKeyWithChanges(k, changes)
}
}
}
// Checking for auth feature here is faster than calling 'authPreferredInsert' and
// checking for it there.
if features.contains(authRules) {
ms.authPreferredInsert(newKey, newEntry, changes)
return
}
}
ms.addKeyWithChanges(newKey, newEntry, changes)
}
// overrideAuthRequirement sets the AuthRequirement of 'v' to that of 'newKey', saving the old entry
// in 'changes'.
func (ms *mapState) overrideAuthRequirement(newEntry mapStateEntry, k Key, v mapStateEntry, changes ChangeState) {
if v.AuthRequirement.AuthType() != newEntry.AuthRequirement.AuthType() {
// Save the old value first
changes.insertOldIfNotExists(k, v)
// Auth type can be changed in-place, trie is not affected
// Only derived auth type is ever overridden, so the explicit flag is not copied
v.AuthRequirement = newEntry.AuthRequirement.AsDerived()
ms.entries[k] = v
}
}
// authPreferredInsert applies AuthRequirement of a more generic entry to more specific entries, if
// not explicitly specified.
//
// This function is expected to be called for a map insertion after deny
// entry evaluation. If there is a covering map key for 'newKey'
// which denies traffic matching 'newKey', then this function should not be called.
func (ms *mapState) authPreferredInsert(newKey Key, newEntry mapStateEntry, changes ChangeState) {
if !newEntry.AuthRequirement.IsExplicit() {
// New entry has a default auth type.
// Fill in the AuthType from the most specific covering key with the same ID and an
// explicit auth type
for _, v := range ms.CoveringKeysWithSameID(newKey) {
if v.AuthRequirement.IsExplicit() {
// AuthType from the most specific covering key is applied
// to 'newEntry'
newEntry.AuthRequirement = v.AuthRequirement.AsDerived()
break
}
}
} else { // New entry has an explicit auth type
// Check if the new key is the most specific covering key of any other key
// with the same ID and default auth type, and propagate the auth type from the new
// entry to such entries.
for k, v := range ms.SubsetKeysWithSameID(newKey) {
if v.IsDeny() || v.AuthRequirement.IsExplicit() {
// Stop if a subset entry is deny or has an explicit auth type, as
// that is the more specific covering key for all remaining subset
// keys
break
}
ms.overrideAuthRequirement(newEntry, k, v, changes)
}
}
ms.addKeyWithChanges(newKey, newEntry, changes)
}
// insertIfNotExists only inserts an entry in 'changes.Old' if 'key' does not exist in there already
// and 'key' does not already exist in 'changes.Adds'. This prevents recording "old" values for
// newly added keys. When an entry is updated, we are called before the key is added to
// 'changes.Adds' so we'll record the old value as expected.
// Returns 'true' if an old entry was added.
func (changes *ChangeState) insertOldIfNotExists(key Key, entry mapStateEntry) bool {
if changes == nil || changes.old == nil {
return false
}
if _, exists := changes.old[key]; !exists {
// Only insert the old entry if the entry was not first added on this round of
// changes.
if _, added := changes.Adds[key]; !added {
changes.old[key] = entry
return true
}
}
return false
}
// determineAllowLocalhostIngress determines whether communication should be allowed
// from the localhost. It inserts the Key corresponding to the localhost in
// the desiredPolicyKeys if the localhost is allowed to communicate with the
// endpoint. Authentication for localhost traffic is not required.
func (ms *mapState) determineAllowLocalhostIngress() {
if option.Config.AlwaysAllowLocalhost() {
entry := newAllowEntryWithLabels(LabelsLocalHostIngress)
ms.insertWithChanges(localHostKey, entry, allFeatures, ChangeState{})
}
}
// allowAllIdentities translates all identities in selectorCache to their
// corresponding Keys in the specified direction (ingress, egress) which allows
// all at L3.
// Note that this is used when policy is not enforced, so authentication is explicitly not required.
func (ms *mapState) allowAllIdentities(ingress, egress bool) {
if ingress {
ms.upsert(allKey[trafficdirection.Ingress], newAllowEntryWithLabels(LabelsAllowAnyIngress))
}
if egress {
ms.upsert(allKey[trafficdirection.Egress], newAllowEntryWithLabels(LabelsAllowAnyEgress))
}
}
// MapChanges collects updates to the endpoint policy on the
// granularity of individual mapstate key-value pairs for both adds
// and deletes. 'mutex' must be held for any access.
type MapChanges struct {
firstVersion versioned.KeepVersion
mutex lock.Mutex
changes []mapChange
synced []mapChange
version *versioned.VersionHandle
}
type mapChange struct {
Add bool // false deletes
Key Key
Value mapStateEntry
}
type MapChange struct {
Add bool // false deletes
Key Key
Value MapStateEntry
}
// AccumulateMapChanges accumulates the given changes to the
// MapChanges.
//
// The caller is responsible for making sure the same identity is not
// present in both 'adds' and 'deletes'.
func (mc *MapChanges) AccumulateMapChanges(adds, deletes []identity.NumericIdentity, keys []Key, value mapStateEntry) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
for _, id := range adds {
for _, k := range keys {
k.Identity = id
mc.changes = append(mc.changes, mapChange{
Add: true,
Key: k,
Value: value,
})
}
}
for _, id := range deletes {
for _, k := range keys {
k.Identity = id
mc.changes = append(mc.changes, mapChange{
Add: false,
Key: k,
Value: value,
})
}
}
}
// SyncMapChanges moves the current batch of changes to 'synced' to be consumed as a unit
func (mc *MapChanges) SyncMapChanges(txn *versioned.Tx) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
if len(mc.changes) > 0 {
// Only apply changes after the initial version
if txn.After(mc.firstVersion) {
mc.synced = append(mc.synced, mc.changes...)
mc.version.Close()
mc.version = txn.GetVersionHandle()
log.WithFields(logrus.Fields{
logfields.NewVersion: mc.version,
}).Debug("SyncMapChanges: Got handle on the new version")
} else {
log.WithFields(logrus.Fields{
logfields.Version: mc.firstVersion,
logfields.OldVersion: txn,
}).Debug("SyncMapChanges: Discarding already applied changes")
}
}
mc.changes = nil
}
// detach releases any version handle we may hold
func (mc *MapChanges) detach() {
mc.mutex.Lock()
mc.version.Close()
mc.mutex.Unlock()
}
// consumeMapChanges transfers the incremental changes from MapChanges to the caller,
// while applying the changes to PolicyMapState.
func (mc *MapChanges) consumeMapChanges(p *EndpointPolicy, features policyFeatures) (*versioned.VersionHandle, ChangeState) {
mc.mutex.Lock()
defer mc.mutex.Unlock()
changes := ChangeState{
Adds: make(Keys, len(mc.synced)),
Deletes: make(Keys, len(mc.synced)),
old: make(mapStateMap, len(mc.synced)),
}
for i := range mc.synced {
key := mc.synced[i].Key
entry := mc.synced[i].Value
if mc.synced[i].Add {
// Insert the key to and collect the incremental changes to the overall
// state in 'changes'
p.policyMapState.insertWithChanges(key, entry, features, changes)
} else {
// Delete the contribution of this cs to the key and collect incremental
// changes
p.policyMapState.deleteKeyWithChanges(key, changes)
}
}
// move version to the caller
version := mc.version
mc.version = nil
mc.synced = nil
return version, changes
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/policy/types"
"github.com/cilium/cilium/pkg/u8proto"
)
func (e mapStateEntry) withLabels(lbls labels.LabelArrayList) mapStateEntry {
e.derivedFromRules = makeRuleOrigin(lbls)
return e
}
// withExplicitAuth sets an explicit auth requirement
func (e mapStateEntry) withExplicitAuth(authType AuthType) mapStateEntry {
e.AuthRequirement = authType.AsExplicitRequirement()
return e
}
// withDerivedAuth sets a derived auth requirement
func (e mapStateEntry) withDerivedAuth(authType AuthType) mapStateEntry {
e.AuthRequirement = authType.AsDerivedRequirement()
return e
}
func (e mapStateEntry) WithProxyPort(proxyPort uint16) mapStateEntry {
e.MapStateEntry = e.MapStateEntry.WithProxyPort(proxyPort)
return e
}
func (ms mapState) withState(initMap mapStateMap) mapState {
for k, v := range initMap {
ms.insert(k, v)
}
return ms
}
func ingressKey(identity identity.NumericIdentity, proto u8proto.U8proto, port uint16, prefixLen uint8) Key {
return IngressKey().WithIdentity(identity).WithPortProtoPrefix(proto, port, prefixLen)
}
func ingressL3OnlyKey(identity identity.NumericIdentity) Key {
return IngressKey().WithIdentity(identity)
}
func egressKey(identity identity.NumericIdentity, proto u8proto.U8proto, port uint16, prefixLen uint8) Key {
return EgressKey().WithIdentity(identity).WithPortProtoPrefix(proto, port, prefixLen)
}
func egressL3OnlyKey(identity identity.NumericIdentity) Key {
return EgressKey().WithIdentity(identity)
}
func TestPolicyKeyTrafficDirection(t *testing.T) {
k := IngressKey()
require.True(t, k.IsIngress())
require.False(t, k.IsEgress())
k = EgressKey()
require.False(t, k.IsIngress())
require.True(t, k.IsEgress())
}
// validatePortProto makes sure each Key in MapState abides by the contract that protocol/nexthdr
// can only be wildcarded if the destination port is also wildcarded.
func (ms *mapState) validatePortProto(t *testing.T) {
ms.forEach(func(k Key, _ mapStateEntry) bool {
if k.Nexthdr == 0 {
require.Equal(t, uint16(0), k.DestPort)
}
return true
})
}
func (e mapStateEntry) withProxyPort(proxyPort uint16) mapStateEntry {
e.MapStateEntry = e.MapStateEntry.WithProxyPort(proxyPort)
return e
}
func (e mapStateEntry) withProxyPortPriority(proxyPort uint16, priority uint8) mapStateEntry {
e.MapStateEntry = e.MapStateEntry.WithProxyPort(proxyPort).WithProxyPriority(priority)
return e
}
func TestMapState_insertWithChanges(t *testing.T) {
allowEntry := NewMapStateEntry(AllowEntry).withLabels(labels.LabelArrayList{nil})
denyEntry := NewMapStateEntry(DenyEntry).withLabels(labels.LabelArrayList{nil})
type args struct {
key Key
entry MapStateEntry
}
tests := []struct {
name string
ms, want mapState
wantAdds, wantDeletes Keys
wantOld mapStateMap
args args
}{
{
name: "test-1 - no KV added, map should remain the same",
ms: testMapState(mapStateMap{
IngressKey(): allowEntry,
}),
args: args{
key: IngressKey(),
entry: AllowEntry,
},
want: testMapState(mapStateMap{
IngressKey(): allowEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-2a - L3 allow KV should not overwrite deny entry",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressL3OnlyKey(1),
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): allowEntry,
ingressKey(1, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-2b - L3 port-range allow KV should not overwrite deny entry",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(1, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-3a - L3-L4 allow KV should not overwrite deny entry",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-3b - L3-L4 port-range allow KV should not overwrite deny entry",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-4a - L3-L4 deny KV should overwrite allow entry",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
},
},
{
name: "test-4b - L3-L4 port-range deny KV should overwrite allow entry",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
},
},
{
name: "test-5a - L3 deny KV should overwrite all L3-L4 allow and L3 allow entries for the same L3",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressL3OnlyKey(1): allowEntry,
ingressKey(2, 3, 80, 0): allowEntry,
ingressL3OnlyKey(2): allowEntry,
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
ingressKey(2, 3, 80, 0): allowEntry,
ingressL3OnlyKey(2): allowEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressL3OnlyKey(1): allowEntry,
ingressKey(1, 3, 80, 0): allowEntry,
},
},
{
name: "test-5b - L3 port-range deny KV should overwrite all L3-L4 allow and L3 allow entries for the same L3",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(2, 3, 80, 0): allowEntry,
ingressKey(2, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
ingressKey(2, 3, 80, 0): allowEntry,
ingressKey(2, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(1, 3, 80, 0): allowEntry,
},
},
{
name: "test-6a - L3 egress deny KV should not overwrite any existing ingress allow",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressL3OnlyKey(1): allowEntry,
ingressKey(2, 3, 80, 0): allowEntry,
ingressL3OnlyKey(2): allowEntry,
}),
args: args{
key: egressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressL3OnlyKey(1): allowEntry,
egressL3OnlyKey(1): denyEntry,
ingressKey(2, 3, 80, 0): allowEntry,
ingressL3OnlyKey(2): allowEntry,
}),
wantAdds: Keys{
egressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-6b - L3 egress port-range deny KV should not overwrite any existing ingress allow",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(2, 3, 80, 0): allowEntry,
ingressKey(2, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
args: args{
key: egressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry,
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
egressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
ingressKey(2, 3, 80, 0): allowEntry,
ingressKey(2, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{
egressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-7a - L3 ingress deny KV should not be overwritten by a L3-L4 ingress allow",
ms: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-7b - L3 ingress deny KV should not be overwritten by a L3-L4 port-range ingress allow",
ms: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-8a - L3 ingress deny KV should not be overwritten by a L3-L4-L7 ingress allow",
ms: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry.WithProxyPort(8080),
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-8b - L3 ingress deny KV should not be overwritten by a L3-L4-L7 port-range ingress allow",
ms: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry.WithProxyPort(8080),
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-9a - L3 ingress deny KV should overwrite a L3-L4-L7 ingress allow",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
},
},
{
name: "test-9b - L3 ingress deny KV should overwrite a L3-L4-L7 port-range ingress allow",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080), // port range 64-127 (64/10)
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080), // port range 64-127 (64/10)
},
},
{
name: "test-10a - L3 ingress deny KV should overwrite a L3-L4-L7 ingress allow and a L3-L4 deny",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
ingressKey(1, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
ingressKey(1, 3, 80, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
ingressKey(1, 3, 80, 0): denyEntry,
},
},
{
name: "test-10b - L3 ingress deny KV should overwrite a L3-L4-L7 port-range ingress allow and a L3-L4 port-range deny",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080), // port range 64-127 (64/10)
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
}),
args: args{
key: ingressL3OnlyKey(1),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressL3OnlyKey(1): denyEntry,
}),
wantAdds: Keys{
ingressL3OnlyKey(1): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
ingressKey(1, 3, 64, 10): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080), // port range 64-127 (64/10)
ingressKey(1, 3, 64, 10): denyEntry, // port range 64-127 (64/10)
},
},
{
name: "test-11a - L3 ingress allow should not be allowed if there is a L3 'all' deny",
ms: testMapState(mapStateMap{
egressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
IngressKey(): denyEntry,
}),
args: args{
key: ingressL3OnlyKey(100),
entry: AllowEntry,
},
want: testMapState(mapStateMap{
egressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
IngressKey(): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-11b - L3 ingress allow should not be allowed if there is a L3 'all' deny",
ms: testMapState(mapStateMap{
egressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080), // port range 64-127 (64/10)
IngressKey(): denyEntry,
}),
args: args{
key: ingressKey(100, 0, 0, 0),
entry: AllowEntry,
},
want: testMapState(mapStateMap{
egressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080), // port range 64-127 (64/10)
IngressKey(): denyEntry,
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-12a - inserting a L3 'all' deny should delete all entries for that direction",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
ingressKey(1, 3, 5, 0): allowEntry.withProxyPort(8080),
egressKey(100, 3, 5, 0): allowEntry.withProxyPort(8080),
}),
args: args{
key: IngressKey(),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
IngressKey(): denyEntry,
egressKey(100, 3, 5, 0): allowEntry.withProxyPort(8080),
}),
wantAdds: Keys{
IngressKey(): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
ingressKey(1, 3, 5, 0): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
ingressKey(1, 3, 5, 0): allowEntry.withProxyPort(8080),
},
},
{
name: "test-12b - inserting a L3 'all' deny should delete all entries for that direction (including port ranges)",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080), // port range 64-127 (64/10)
ingressKey(1, 3, 4, 14): allowEntry.withProxyPort(8080),
egressKey(100, 3, 4, 14): allowEntry.withProxyPort(8080),
}),
args: args{
key: IngressKey(),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
IngressKey(): denyEntry,
egressKey(100, 3, 4, 14): allowEntry.withProxyPort(8080),
}),
wantAdds: Keys{
IngressKey(): struct{}{},
},
wantDeletes: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
ingressKey(1, 3, 4, 14): struct{}{},
},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080), // port range 64-127 (64/10)
ingressKey(1, 3, 4, 14): allowEntry.withProxyPort(8080),
},
},
{
name: "test-13a - L3-L4-L7 ingress allow should overwrite a L3-L4-L7 ingress allow due to lower priority",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry.WithProxyPort(9090).WithProxyPriority(1),
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPortPriority(9090, 1),
}),
wantAdds: Keys{
ingressKey(1, 3, 80, 0): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
},
},
{
name: "test-13b - L3-L4-L7 port-range ingress allow should overwrite a L3-L4-L7 port-range ingress allow due to lower priority",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 64, 10),
entry: AllowEntry.WithProxyPort(9090).WithProxyPriority(1),
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPortPriority(9090, 1),
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080),
},
},
{
name: "test-14a - L3-L4-L7 ingress allow should overwrite a L3-L4-L7 ingress allow due to higher priority on the same port",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 80, 0),
entry: AllowEntry.WithProxyPort(8080).WithProxyPriority(1),
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPortPriority(8080, 1),
}),
wantAdds: Keys{
ingressKey(1, 3, 80, 0): struct{}{}, // precedence changed
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 80, 0): allowEntry.withProxyPort(8080),
},
},
{
name: "test-14b - L3-L4-L7 port-range ingress allow should overwrite a L3-L4-L7 port-range ingress allow due to higher priority on the same port",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 64, 10),
entry: AllowEntry.WithProxyPort(8080).WithProxyPriority(1),
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPortPriority(8080, 1),
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{}, // precedence changed
},
wantDeletes: Keys{},
wantOld: mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080),
},
},
{
name: "test-14c - L3-L4 ingress allow should not overwrite a L3-L4-L7 port-range ingress allow on overlapping port",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 80, 16),
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry.withProxyPort(8080),
}),
wantAdds: Keys{},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-15a - L3 port-range allow KV should not overwrite a wildcard deny entry",
ms: testMapState(mapStateMap{
ingressKey(0, 3, 80, 0): denyEntry,
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(0, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-15b-reverse - L3 port-range allow KV should not overwrite a wildcard deny entry",
ms: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
args: args{
key: ingressKey(0, 3, 80, 0),
entry: DenyEntry,
},
want: testMapState(mapStateMap{
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
ingressKey(0, 3, 80, 0): denyEntry,
}),
wantAdds: Keys{
ingressKey(0, 3, 80, 16): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
{
name: "test-16a - No added entry for L3 port-range allow + wildcard allow entry",
ms: testMapState(mapStateMap{
ingressKey(0, 3, 80, 0): allowEntry.withProxyPort(8080),
}),
args: args{
key: ingressKey(1, 3, 64, 10), // port range 64-127 (64/10)
entry: AllowEntry,
},
want: testMapState(mapStateMap{
ingressKey(0, 3, 80, 0): allowEntry.withProxyPort(8080),
ingressKey(1, 3, 64, 10): allowEntry, // port range 64-127 (64/10)
}),
wantAdds: Keys{
ingressKey(1, 3, 64, 10): struct{}{},
},
wantDeletes: Keys{},
wantOld: mapStateMap{},
},
}
for _, tt := range tests {
t.Log(tt.name)
changes := ChangeState{
Adds: make(Keys),
Deletes: make(Keys),
old: make(mapStateMap),
}
// copy the starting point
ms := testMapState(make(mapStateMap, tt.ms.Len()))
tt.ms.forEach(func(k Key, v mapStateEntry) bool {
ms.insert(k, v)
return true
})
entry := NewMapStateEntry(tt.args.entry).withLabels(labels.LabelArrayList{nil})
ms.insertWithChanges(tt.args.key, entry, denyRules, changes)
ms.validatePortProto(t)
require.Truef(t, ms.Equal(&tt.want), "%s: MapState mismatch:\n%s", tt.name, ms.diff(&tt.want))
require.EqualValuesf(t, tt.wantAdds, changes.Adds, "%s: Adds mismatch", tt.name)
require.EqualValuesf(t, tt.wantDeletes, changes.Deletes, "%s: Deletes mismatch", tt.name)
require.EqualValuesf(t, tt.wantOld, changes.old, "%s: OldValues mismatch allows", tt.name)
// Revert changes and check that we get the original mapstate
ms.revertChanges(changes)
require.Truef(t, ms.Equal(&tt.ms), "%s: MapState mismatch:\n%s", tt.name, ms.diff(&tt.ms))
}
}
func DNSUDPEgressKey(id identity.NumericIdentity) Key {
return EgressKey().WithIdentity(id).WithUDPPort(53)
}
func DNSTCPEgressKey(id identity.NumericIdentity) Key {
return EgressKey().WithIdentity(id).WithTCPPort(53)
}
func HostIngressKey() Key {
return IngressKey().WithIdentity(identity.ReservedIdentityHost)
}
func AnyIngressKey() Key {
return IngressKey()
}
func AnyEgressKey() Key {
return EgressKey()
}
func HttpIngressKey(id identity.NumericIdentity) Key {
return IngressKey().WithIdentity(id).WithTCPPort(80)
}
func HttpEgressKey(id identity.NumericIdentity) Key {
return EgressKey().WithIdentity(id).WithTCPPort(80)
}
func TcpEgressKey(id identity.NumericIdentity) Key {
return EgressKey().WithIdentity(id).WithTCPPort(0)
}
func allowEntry() mapStateEntry {
return NewMapStateEntry(AllowEntry).withLabels(labels.LabelArrayList{nil})
}
func proxyEntry(proxyPort uint16) mapStateEntry {
return NewMapStateEntry(AllowEntry.WithProxyPort(proxyPort)).withLabels(labels.LabelArrayList{nil})
}
func denyEntry() mapStateEntry {
return NewMapStateEntry(DenyEntry).withLabels(labels.LabelArrayList{nil})
}
func TestMapState_AccumulateMapChangesDeny(t *testing.T) {
csFoo := newTestCachedSelector("Foo", false)
csBar := newTestCachedSelector("Bar", false)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
}
selectorCache := testNewSelectorCache(identityCache)
type args struct {
cs *testCachedSelector
adds []int
deletes []int
port uint16
proto u8proto.U8proto
ingress bool
redirect bool
deny bool
}
tests := []struct {
continued bool // Start from the end state of the previous test
name string
setup mapState
args []args // changes applied, in order
state mapState
adds Keys
deletes Keys
}{{
name: "test-0 - Adding L4-only redirect allow key to an existing allow-all with L3-only deny",
setup: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
}),
args: []args{
{cs: csFoo, adds: []int{0}, deletes: []int{}, port: 80, proto: 6, ingress: true, redirect: true, deny: false},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
HttpIngressKey(0): proxyEntry(1),
}),
adds: Keys{
HttpIngressKey(0): {},
},
deletes: Keys{},
}, {
name: "test-1a - Adding L3-deny to an existing allow-all with L4-only allow redirect map state entries",
setup: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HttpIngressKey(0): proxyEntry(12345),
}),
args: []args{
{cs: csFoo, adds: []int{41}, deletes: []int{}, port: 0, proto: 0, ingress: true, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
HttpIngressKey(0): proxyEntry(12345),
}),
adds: Keys{
ingressL3OnlyKey(41): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-1b - Adding 2nd identity",
args: []args{
{cs: csFoo, adds: []int{42}, deletes: []int{}, port: 0, proto: 0, ingress: true, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
ingressL3OnlyKey(42): denyEntry(),
HttpIngressKey(0): proxyEntry(12345),
}),
adds: Keys{
ingressL3OnlyKey(42): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-1c - Removing the same key",
args: []args{
{cs: csFoo, adds: nil, deletes: []int{42}, port: 0, proto: 0, ingress: true, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
HttpIngressKey(0): proxyEntry(12345),
}),
adds: Keys{},
deletes: Keys{
ingressL3OnlyKey(42): {},
},
}, {
name: "test-2a - Adding 2 identities, and deleting a nonexisting key on an empty state",
args: []args{
{cs: csFoo, adds: []int{42, 43}, deletes: []int{50}, port: 80, proto: 6, ingress: true, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
HttpIngressKey(42): denyEntry(),
HttpIngressKey(43): denyEntry(),
}),
adds: Keys{
HttpIngressKey(42): {},
HttpIngressKey(43): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-2b - Adding Bar also selecting 42 (and 44)",
args: []args{
{cs: csBar, adds: []int{42, 44}, deletes: []int{}, port: 80, proto: 6, ingress: true, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
HttpIngressKey(42): denyEntry(),
HttpIngressKey(43): denyEntry(),
HttpIngressKey(44): denyEntry(),
}),
adds: Keys{
HttpIngressKey(44): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-2c - Deleting 42",
args: []args{
{cs: csFoo, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: true, redirect: false, deny: true},
{cs: csBar, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: true, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
HttpIngressKey(43): denyEntry(),
HttpIngressKey(44): denyEntry(),
}),
adds: Keys{},
deletes: Keys{
HttpIngressKey(42): {},
},
}, {
continued: true,
name: "test-2d - Adding an entry that already exists, no adds",
args: []args{
{cs: csBar, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: true, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
HttpIngressKey(43): denyEntry(),
HttpIngressKey(44): denyEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
continued: false,
name: "test-3a - egress allow with deny-L3",
setup: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
args: []args{
{cs: csBar, adds: []int{42}, deletes: []int{}, port: 53, proto: 17, ingress: false, redirect: false, deny: false},
{cs: csBar, adds: []int{42}, deletes: []int{}, port: 53, proto: 6, ingress: false, redirect: false, deny: false},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
continued: true,
name: "test-3b - egress allow DNS on another ID with deny-L3",
args: []args{
{cs: csBar, adds: []int{43}, deletes: []int{}, port: 53, proto: 17, ingress: false, redirect: false, deny: false},
{cs: csBar, adds: []int{43}, deletes: []int{}, port: 53, proto: 6, ingress: false, redirect: false, deny: false},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
DNSUDPEgressKey(43): allowEntry(),
DNSTCPEgressKey(43): allowEntry(),
}),
adds: Keys{
DNSUDPEgressKey(43): {},
DNSTCPEgressKey(43): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-3c - egress allow HTTP proxy with deny-L3",
args: []args{
{cs: csFoo, adds: []int{43}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
DNSUDPEgressKey(43): allowEntry(),
DNSTCPEgressKey(43): allowEntry(),
HttpEgressKey(43): proxyEntry(1),
}),
adds: Keys{
HttpEgressKey(43): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-4a - Add L7 skipped due to covering L3 deny",
setup: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
args: []args{
{cs: csFoo, adds: []int{42}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
continued: true,
name: "test-4b - Add & delete L7 skipped due to covering L3 deny",
args: []args{
{cs: csFoo, adds: []int{42}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
{cs: csFoo, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
egressKey(42, 0, 0, 0): denyEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
name: "test-5 - Adding L3-deny to an existing allow-all",
setup: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
}),
args: []args{
{cs: csFoo, adds: []int{41}, deletes: []int{}, port: 0, proto: 0, ingress: true, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
ingressL3OnlyKey(41): denyEntry(),
}),
adds: Keys{
ingressL3OnlyKey(41): {},
},
deletes: Keys{},
}, {
name: "test-6 - Multiple entries",
setup: testMapState(mapStateMap{
AnyEgressKey(): allowEntry(),
HttpEgressKey(0): proxyEntry(12345),
DNSUDPEgressKey(0): proxyEntry(12346),
}),
args: []args{
{cs: csFoo, adds: []int{41}, deletes: []int{}, port: 0, proto: 0, ingress: false, redirect: false, deny: true},
},
state: testMapState(mapStateMap{
AnyEgressKey(): allowEntry(),
egressKey(41, 0, 0, 0): denyEntry(),
HttpEgressKey(0): proxyEntry(12345),
DNSUDPEgressKey(0): proxyEntry(12346),
}),
adds: Keys{
egressKey(41, 0, 0, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-n - title",
args: []args{
//{cs: csFoo, adds: []int{42, 43}, deletes: []int{50}, port: 80, proto: 6, ingress: true, redirect: false, deny: false},
},
state: emptyMapState(),
adds: Keys{
//HttpIngressKey(42): allowEntry(),
},
deletes: Keys{
//HttpIngressKey(43): allowEntry(),
},
},
}
epPolicy := &EndpointPolicy{
selectorPolicy: &selectorPolicy{
SelectorCache: selectorCache,
},
PolicyOwner: DummyOwner{},
}
policyMapState := emptyMapState()
for _, tt := range tests {
policyMaps := MapChanges{}
if !tt.continued {
if tt.setup.Valid() {
policyMapState = tt.setup
} else {
policyMapState = testMapState(nil)
}
}
epPolicy.policyMapState = policyMapState
for _, x := range tt.args {
dir := trafficdirection.Egress
if x.ingress {
dir = trafficdirection.Ingress
}
adds := x.cs.addSelections(x.adds...)
deletes := x.cs.deleteSelections(x.deletes...)
key := KeyForDirection(dir).WithPortProto(x.proto, x.port)
var proxyPort uint16
if x.redirect {
proxyPort = 1
}
value := newMapStateEntry(NilRuleOrigin, proxyPort, 0, x.deny, NoAuthRequirement)
policyMaps.AccumulateMapChanges(adds, deletes, []Key{key}, value)
}
policyMaps.SyncMapChanges(versioned.LatestTx)
handle, changes := policyMaps.consumeMapChanges(epPolicy, denyRules)
if handle != nil {
handle.Close()
}
policyMapState.validatePortProto(t)
require.True(t, policyMapState.Equal(&tt.state), "%s (MapState):\n%s", tt.name, policyMapState.diff(&tt.state))
require.EqualValues(t, tt.adds, changes.Adds, tt.name+" (adds)")
require.EqualValues(t, tt.deletes, changes.Deletes, tt.name+" (deletes)")
}
}
func TestMapState_AccumulateMapChanges(t *testing.T) {
csFoo := newTestCachedSelector("Foo", false)
csBar := newTestCachedSelector("Bar", false)
csWildcard := newTestCachedSelector("wildcard", true)
identityCache := identity.IdentityMap{
identity.NumericIdentity(identityFoo): labelsFoo,
}
selectorCache := testNewSelectorCache(identityCache)
type args struct {
cs *testCachedSelector
adds []int
deletes []int
port uint16
proto u8proto.U8proto
ingress bool
redirect bool
deny bool
authReq AuthRequirement
}
tests := []struct {
continued bool // Start from the end state of the previous test
name string
args []args // changes applied, in order
state mapState
adds Keys
deletes Keys
}{{
name: "test-2a - Adding 2 identities, and deleting a nonexisting key on an empty state",
args: []args{
{cs: csFoo, adds: []int{42, 43}, deletes: []int{50}, port: 80, proto: 6, ingress: true, redirect: false, deny: false},
},
state: testMapState(mapStateMap{
HttpIngressKey(42): allowEntry(),
HttpIngressKey(43): allowEntry(),
}),
adds: Keys{
HttpIngressKey(42): {},
HttpIngressKey(43): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-2b - Adding Bar also selecting 42",
args: []args{
{cs: csBar, adds: []int{42, 44}, deletes: []int{}, port: 80, proto: 6, ingress: true, redirect: false, deny: false},
},
state: testMapState(mapStateMap{
HttpIngressKey(42): allowEntry(),
HttpIngressKey(43): allowEntry(),
HttpIngressKey(44): allowEntry(),
}),
adds: Keys{
HttpIngressKey(44): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-2c - Deleting 42",
args: []args{
{cs: csFoo, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: true, redirect: false, deny: false},
{cs: csBar, adds: []int{}, deletes: []int{42}, port: 80, proto: 6, ingress: true, redirect: false, deny: false},
},
state: testMapState(mapStateMap{
HttpIngressKey(43): allowEntry(),
HttpIngressKey(44): allowEntry(),
}),
adds: Keys{},
deletes: Keys{
HttpIngressKey(42): {},
},
}, {
continued: true,
name: "test-2f - Adding an entry that already exists, no adds",
args: []args{
{cs: csBar, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: true, redirect: false, deny: false},
},
state: testMapState(mapStateMap{
HttpIngressKey(43): allowEntry(),
HttpIngressKey(44): allowEntry(),
}),
adds: Keys{},
deletes: Keys{},
}, {
continued: false,
name: "test-3a - egress HTTP proxy (setup)",
args: []args{
{cs: nil, adds: []int{0}, deletes: []int{}, port: 0, proto: 0, ingress: true, redirect: false, deny: false},
{cs: nil, adds: []int{1}, deletes: []int{}, port: 0, proto: 0, ingress: true, redirect: false, deny: false},
{cs: csBar, adds: []int{42}, deletes: []int{}, port: 53, proto: 17, ingress: false, redirect: false, deny: false},
{cs: csBar, adds: []int{42}, deletes: []int{}, port: 53, proto: 6, ingress: false, redirect: false, deny: false},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
DNSUDPEgressKey(42): allowEntry(),
DNSTCPEgressKey(42): allowEntry(),
}),
adds: Keys{
AnyIngressKey(): {},
HostIngressKey(): {},
DNSUDPEgressKey(42): {},
DNSTCPEgressKey(42): {},
},
deletes: Keys{},
}, {
continued: true,
name: "test-3b - egress HTTP proxy (incremental update)",
args: []args{
{cs: csFoo, adds: []int{43}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
},
state: testMapState(mapStateMap{
AnyIngressKey(): allowEntry(),
HostIngressKey(): allowEntry(),
DNSUDPEgressKey(42): allowEntry(),
DNSTCPEgressKey(42): allowEntry(),
HttpEgressKey(43): proxyEntry(1),
}),
adds: Keys{
HttpEgressKey(43): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-4a - Add & delete; delete cancels the add in reply",
args: []args{
{cs: csFoo, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
{cs: csFoo, adds: []int{}, deletes: []int{44}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
},
state: emptyMapState(),
adds: Keys{},
deletes: Keys{
// Delete of the key is recoded as the key may have existed already in the (bpf) map
HttpEgressKey(44): {},
},
}, {
continued: true,
name: "test-4b - Add, delete, & add; delete suppressed",
args: []args{
{cs: csFoo, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
{cs: csFoo, adds: []int{}, deletes: []int{44}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
{cs: csFoo, adds: []int{44}, deletes: []int{}, port: 80, proto: 6, ingress: false, redirect: true, deny: false},
},
state: testMapState(mapStateMap{
HttpEgressKey(44): proxyEntry(1),
}),
adds: Keys{
HttpEgressKey(44): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5a - auth type propagation from the most specific covering key",
args: []args{
{cs: csFoo, adds: []int{43}, authReq: AuthTypeAlwaysFail.AsExplicitRequirement()},
{cs: csFoo, adds: []int{0}, proto: 6, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csBar, adds: []int{43}, port: 80, proto: 6, redirect: true},
},
state: testMapState(mapStateMap{
egressKey(43, 0, 0, 0): allowEntry().withExplicitAuth(AuthTypeAlwaysFail),
egressKey(0, 6, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 80, 0): proxyEntry(1).withDerivedAuth(AuthTypeAlwaysFail),
}),
adds: Keys{
egressKey(43, 0, 0, 0): {},
egressKey(0, 6, 0, 0): {},
egressKey(43, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-5b - auth type propagation from the most specific covering key - reverse",
args: []args{
{cs: csBar, adds: []int{43}, port: 80, proto: 6, redirect: true},
{cs: csFoo, adds: []int{0}, proto: 6, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csFoo, adds: []int{43}, authReq: AuthTypeAlwaysFail.AsExplicitRequirement()},
},
state: testMapState(mapStateMap{
egressKey(43, 0, 0, 0): allowEntry().withExplicitAuth(AuthTypeAlwaysFail),
egressKey(0, 6, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(43, 6, 80, 0): proxyEntry(1).withDerivedAuth(AuthTypeAlwaysFail),
}),
adds: Keys{
egressKey(43, 0, 0, 0): {},
egressKey(0, 6, 0, 0): {},
egressKey(43, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-6a - L3-only explicit auth type and L4-only without",
args: []args{
{cs: csFoo, adds: []int{43}, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csWildcard, adds: []int{0}, port: 80, proto: 6, redirect: true},
},
state: testMapState(mapStateMap{
egressKey(43, 0, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(0, 6, 80, 0): proxyEntry(1),
}),
adds: Keys{
egressKey(43, 0, 0, 0): {},
egressKey(0, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-6b - L3-only explicit auth type and L4-only without - reverse",
args: []args{
{cs: csWildcard, adds: []int{0}, port: 80, proto: 6, redirect: true},
{cs: csFoo, adds: []int{43}, authReq: AuthTypeSpire.AsExplicitRequirement()},
},
state: testMapState(mapStateMap{
egressKey(43, 0, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(0, 6, 80, 0): proxyEntry(1),
}),
adds: Keys{
egressKey(43, 0, 0, 0): {},
egressKey(0, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-7a - L3/proto explicit auth type and L4-only without",
args: []args{
{cs: csFoo, adds: []int{43}, proto: 6, authReq: AuthTypeSpire.AsExplicitRequirement()},
{cs: csWildcard, adds: []int{0}, port: 80, proto: 6, redirect: true},
},
state: testMapState(mapStateMap{
egressKey(43, 6, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(0, 6, 80, 0): proxyEntry(1),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(0, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-7b - L3/proto explicit auth type and L4-only without - reverse",
args: []args{
{cs: csWildcard, adds: []int{0}, port: 80, proto: 6, redirect: true},
{cs: csFoo, adds: []int{43}, proto: 6, authReq: AuthTypeSpire.AsExplicitRequirement()},
},
state: testMapState(mapStateMap{
egressKey(43, 6, 0, 0): allowEntry().withExplicitAuth(AuthTypeSpire),
egressKey(0, 6, 80, 0): proxyEntry(1),
}),
adds: Keys{
egressKey(43, 6, 0, 0): {},
egressKey(0, 6, 80, 0): {},
},
deletes: Keys{},
}, {
continued: false,
name: "test-n - title",
args: []args{
//{cs: csFoo, adds: []int{42, 43}, deletes: []int{50}, port: 80, proto: 6, ingress: true, redirect: false, deny: false},
},
state: emptyMapState(),
adds: Keys{
//HttpIngressKey(42): allowEntry(),
},
deletes: Keys{
//HttpIngressKey(43): allowEntry(),
},
},
}
epPolicy := &EndpointPolicy{
selectorPolicy: &selectorPolicy{
SelectorCache: selectorCache,
},
PolicyOwner: DummyOwner{},
}
policyMapState := emptyMapState()
for _, tt := range tests {
t.Log(tt.name)
policyMaps := MapChanges{}
if !tt.continued {
policyMapState = emptyMapState()
}
epPolicy.policyMapState = policyMapState
for _, x := range tt.args {
dir := trafficdirection.Egress
if x.ingress {
dir = trafficdirection.Ingress
}
adds := x.cs.addSelections(x.adds...)
deletes := x.cs.deleteSelections(x.deletes...)
key := KeyForDirection(dir).WithPortProto(x.proto, x.port)
var proxyPort uint16
if x.redirect {
proxyPort = 1
}
value := newMapStateEntry(NilRuleOrigin, proxyPort, 0, x.deny, x.authReq)
policyMaps.AccumulateMapChanges(adds, deletes, []Key{key}, value)
}
policyMaps.SyncMapChanges(versioned.LatestTx)
handle, changes := policyMaps.consumeMapChanges(epPolicy, authRules|denyRules)
if handle != nil {
handle.Close()
}
policyMapState.validatePortProto(t)
require.True(t, policyMapState.Equal(&tt.state), "%s (MapState):\n%s", tt.name, policyMapState.diff(&tt.state))
require.EqualValues(t, tt.adds, changes.Adds, tt.name+" (adds)")
require.EqualValues(t, tt.deletes, changes.Deletes, tt.name+" (deletes)")
}
}
func TestMapState_denyPreferredInsertWithSubnets(t *testing.T) {
// Mock the identities what would be selected by the world, IP, and subnet selectors
// Selections for the label selector 'reserved:world'
reservedWorldSelections := identity.NumericIdentitySlice{identity.ReservedIdentityWorld, worldIPIdentity, worldSubnetIdentity}
// Selections for the CIDR selector 'cidr:192.0.2.3/32'
worldIPSelections := identity.NumericIdentitySlice{worldIPIdentity}
// Selections for the CIDR selector 'cidr:192.0.2.0/24'
worldSubnetSelections := identity.NumericIdentitySlice{worldSubnetIdentity, worldIPIdentity}
type action uint32
const (
noAction = action(iota)
insertAllowAll = action(1 << iota)
insertA
insertB
worldIPl3only // Do not expect L4 keys for IP covered by a subnet
worldIPProtoOnly // Do not expect port keys for IP covered by a subnet
worldSubnetl3only // Do not expect L4 keys for IP subnet
worldSubnetProtoOnly // Do not expect port keys for IP subnet
insertDenyWorld
insertDenyWorldTCP
insertDenyWorldHTTP
insertAL3NotInB
insertBL3NotInA
insertBoth = insertA | insertB
)
type withAllowAll bool
const (
WithAllowAll = withAllowAll(true)
WithoutAllowAll = withAllowAll(false)
)
// these tests are based on the sheet https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw#gid=2109052536
tests := []struct {
name string
withAllowAll withAllowAll
aIdentities identity.NumericIdentitySlice
bIdentities identity.NumericIdentitySlice
aIsDeny, bIsDeny bool
aPort uint16
aProto u8proto.U8proto
bPort uint16
bProto u8proto.U8proto
outcome action
}{
// deny-allow insertions
{"deny-allow: a superset a|b L3-only; subset allow inserted as deny", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 0, 0, insertAllowAll | insertA},
{"deny-allow: a superset a|b L3-only; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 0, 0, insertA},
{"deny-allow: b superset a|b L3-only", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: b superset a|b L3-only; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 0, 0, insertBoth},
{"deny-allow: a superset a L3-only, b L4; subset allow inserted as deny", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 0, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L3-only, b L4; without allow-all, subset allow inserted as deny", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 0, 6, insertA},
{"deny-allow: b superset a L3-only, b L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 0, 6, insertAllowAll | insertBoth | worldIPl3only},
{"deny-allow: b superset a L3-only, b L4; without allow-all, added deny TCP due to intersecting deny", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 0, 6, insertBoth | worldIPl3only},
{"deny-allow: a superset a L3-only, b L3L4; subset allow inserted as deny", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 80, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L3-only, b L3L4; without allow-all, subset allow inserted as deny", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 0, 80, 6, insertA},
{"deny-allow: b superset a L3-only, b L3L4; IP allow not inserted", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 80, 6, insertAllowAll | insertBoth | worldIPl3only},
{"deny-allow: b superset a L3-only, b L3L4; without allow-all, IP allow not inserted", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 0, 80, 6, insertBoth | worldIPl3only},
{"deny-allow: a superset a L4, b L3-only", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: a superset a L4, b L3-only; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 0, 0, insertBoth},
{"deny-allow: b superset a L4, b L3-only", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: b superset a L4, b L3-only; without allow-all, more specific deny added", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 0, 0, insertBoth},
{"deny-allow: a superset a L4, b L4; subset allow inserted as deny", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 0, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L4, b L4; without allow-all, subset allow inserted as deny", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 0, 6, insertA},
{"deny-allow: b superset a L4, b L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-allow: b superset a L4, b L4; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 0, 6, insertBoth},
{"deny-allow: a superset a L4, b L3L4; subset allow not inserted", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 80, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L4, b L3L4; without allow-all, subset allow not inserted", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 0, 6, 80, 6, insertA},
{"deny-allow: b superset a L4, b L3L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 80, 6, insertAllowAll | insertBoth | worldIPProtoOnly},
{"deny-allow: b superset a L4, b L3L4; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 0, 6, 80, 6, insertBoth | worldIPProtoOnly},
{"deny-allow: a superset a L3L4, b L3-only", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: a superset a L3L4, b L3-only; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 0, 0, insertBoth},
{"deny-allow: b superset a L3L4, b L3-only", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 0, 0, insertAllowAll | insertBoth},
{"deny-allow: b superset a L3L4, b L3-only; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 0, 0, insertBoth},
{"deny-allow: a superset a L3L4, b L4", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-allow: a superset a L3L4, b L4; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 0, 6, insertBoth},
{"deny-allow: b superset a L3L4, b L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-allow: b superset a L3L4, b L4; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 0, 6, insertBoth},
{"deny-allow: a superset a L3L4, b L3L4", WithAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 80, 6, insertAllowAll | insertA},
{"deny-allow: a superset a L3L4, b L3L4; without allow-all", WithoutAllowAll, reservedWorldSelections, worldSubnetSelections, true, false, 80, 6, 80, 6, insertA},
{"deny-allow: b superset a L3L4, b L3L4", WithAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 80, 6, insertAllowAll | insertBoth},
{"deny-allow: b superset a L3L4, b L3L4; without allow-all", WithoutAllowAll, worldIPSelections, worldSubnetSelections, true, false, 80, 6, 80, 6, insertBoth},
// deny-deny insertions: Note: There is no redundancy between different non-zero security IDs on the
// datapath, even if one would be a CIDR subset of another. Situation would be different if we could
// completely remove (or not add in the first place) the redundant ID from the ipcache so that
// datapath could never assign that ID to a packet for policy enforcement.
// These test case are left here for such future improvement.
{"deny-deny: a superset a|b L3-only", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 0, 0, insertAllowAll | insertBoth},
{"deny-deny: a superset a|b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 0, 0, insertBoth},
{"deny-deny: b superset a|b L3-only", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 0, 0, insertAllowAll | insertBoth},
{"deny-deny: b superset a|b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 0, 0, insertBoth},
{"deny-deny: a superset a L3-only, b L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 0, 6, insertAllowAll | insertA},
{"deny-deny: a superset a L3-only, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 0, 6, insertA},
{"deny-deny: b superset a L3-only, b L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 0, 6, insertAllowAll | insertBoth | insertBL3NotInA},
{"deny-deny: b superset a L3-only, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 0, 6, insertBoth | insertBL3NotInA},
{"deny-deny: a superset a L3-only, b L3L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 80, 6, insertAllowAll | insertA},
{"deny-deny: a superset a L3-only, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 0, 80, 6, insertA},
{"deny-deny: b superset a L3-only, b L3L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 80, 6, insertAllowAll | insertBoth | insertBL3NotInA},
{"deny-deny: b superset a L3-only, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 0, 80, 6, insertBoth | insertBL3NotInA},
{"deny-deny: a superset a L4, b L3-only", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 0, 0, insertAllowAll | insertBoth | insertAL3NotInB},
{"deny-deny: a superset a L4, b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 0, 0, insertBoth | insertAL3NotInB},
{"deny-deny: b superset a L4, b L3-only", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 0, 0, insertAllowAll | insertB},
{"deny-deny: b superset a L4, b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 0, 0, insertB},
{"deny-deny: a superset a L4, b L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-deny: a superset a L4, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 0, 6, insertBoth},
{"deny-deny: b superset a L4, b L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 0, 6, insertAllowAll | insertBoth},
{"deny-deny: b superset a L4, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 0, 6, insertBoth},
{"deny-deny: a superset a L4, b L3L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 80, 6, insertAllowAll | insertA},
{"deny-deny: a superset a L4, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 0, 6, 80, 6, insertA},
{"deny-deny: b superset a L4, b L3L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 80, 6, insertAllowAll | insertBoth | insertBL3NotInA},
{"deny-deny: b superset a L4, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 0, 6, 80, 6, insertBoth | insertBL3NotInA},
{"deny-deny: a superset a L3L4, b L3-only", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 0, 0, insertAllowAll | insertBoth | insertAL3NotInB},
{"deny-deny: a superset a L3L4, b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 0, 0, insertBoth | insertAL3NotInB},
{"deny-deny: b superset a L3L4, b L3-only", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 0, 0, insertAllowAll | insertB},
{"deny-deny: b superset a L3L4, b L3-only; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 0, 0, insertB},
{"deny-deny: a superset a L3L4, b L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 0, 6, insertAllowAll | insertBoth | insertAL3NotInB},
{"deny-deny: a superset a L3L4, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 0, 6, insertBoth | insertAL3NotInB},
{"deny-deny: b superset a L3L4, b L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 0, 6, insertAllowAll | insertB},
{"deny-deny: b superset a L3L4, b L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 0, 6, insertB},
{"deny-deny: a superset a L3L4, b L3L4", WithAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 80, 6, insertAllowAll | insertBoth},
{"deny-deny: a superset a L3L4, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, worldIPSelections, true, true, 80, 6, 80, 6, insertBoth},
{"deny-deny: b superset a L3L4, b L3L4", WithAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 80, 6, insertAllowAll | insertBoth},
{"deny-deny: b superset a L3L4, b L3L4; without allow-all", WithoutAllowAll, worldSubnetSelections, reservedWorldSelections, true, true, 80, 6, 80, 6, insertBoth},
// allow-allow insertions do not need tests as their affect on one another does not matter.
}
for _, tt := range tests {
anyIngressKey := IngressKey()
allowEntry := allowEntry()
var aKeys []Key
for _, idA := range tt.aIdentities {
if tt.outcome&worldIPl3only > 0 && idA == worldIPIdentity &&
(tt.aProto != 0 || tt.aPort != 0) {
continue
}
if tt.outcome&worldIPProtoOnly > 0 && idA == worldIPIdentity &&
tt.aPort != 0 {
continue
}
if tt.outcome&worldSubnetl3only > 0 && idA == worldSubnetIdentity &&
(tt.aProto != 0 || tt.aPort != 0) {
continue
}
if tt.outcome&worldSubnetProtoOnly > 0 && idA == worldSubnetIdentity &&
tt.aPort != 0 {
continue
}
aKeys = append(aKeys, IngressKey().WithIdentity(idA).WithPortProto(tt.aProto, tt.aPort))
}
aEntry := NewMapStateEntry(types.NewMapStateEntry(tt.aIsDeny, 0, 0, types.NoAuthRequirement))
var bKeys []Key
for _, idB := range tt.bIdentities {
if tt.outcome&worldIPl3only > 0 && idB == worldIPIdentity &&
(tt.bProto != 0 || tt.bPort != 0) {
continue
}
if tt.outcome&worldIPProtoOnly > 0 && idB == worldIPIdentity &&
tt.bPort != 0 {
continue
}
if tt.outcome&worldSubnetl3only > 0 && idB == worldSubnetIdentity &&
(tt.bProto != 0 || tt.bPort != 0) {
continue
}
if tt.outcome&worldSubnetProtoOnly > 0 && idB == worldSubnetIdentity &&
tt.bPort != 0 {
continue
}
bKeys = append(bKeys, IngressKey().WithIdentity(idB).WithPortProto(tt.bProto, tt.bPort))
}
bEntry := NewMapStateEntry(types.NewMapStateEntry(tt.bIsDeny, 0, 0, types.NoAuthRequirement))
expectedKeys := emptyMapState()
if tt.outcome&insertAllowAll > 0 {
expectedKeys.insert(anyIngressKey, allowEntry)
}
// insert allow expectations before deny expectations to manage overlap
if tt.outcome&insertB > 0 {
BLoop1:
for _, bKey := range bKeys {
if tt.outcome&insertBL3NotInA > 0 {
for _, aKey := range aKeys {
if bKey.Identity == aKey.Identity {
continue BLoop1
}
}
}
expectedKeys.insert(bKey, bEntry)
}
}
if tt.outcome&insertA > 0 {
ALoop:
for _, aKey := range aKeys {
if tt.outcome&insertAL3NotInB > 0 {
for _, bKey := range bKeys {
if aKey.Identity == bKey.Identity {
continue ALoop
}
}
}
expectedKeys.insert(aKey, aEntry)
}
}
if tt.outcome&insertDenyWorld > 0 {
worldIngressKey := IngressKey().WithIdentity(2)
denyEntry := NewMapStateEntry(DenyEntry)
expectedKeys.insert(worldIngressKey, denyEntry)
}
if tt.outcome&insertDenyWorldTCP > 0 {
worldIngressKey := IngressKey().WithIdentity(2).WithTCPPort(0)
denyEntry := NewMapStateEntry(DenyEntry)
expectedKeys.insert(worldIngressKey, denyEntry)
}
if tt.outcome&insertDenyWorldHTTP > 0 {
worldIngressKey := IngressKey().WithIdentity(2).WithTCPPort(80)
denyEntry := NewMapStateEntry(DenyEntry)
expectedKeys.insert(worldIngressKey, denyEntry)
}
outcomeKeys := emptyMapState()
changes := ChangeState{}
if tt.withAllowAll {
outcomeKeys.insertWithChanges(anyIngressKey, allowEntry, allFeatures, changes)
}
for _, idA := range tt.aIdentities {
aKey := IngressKey().WithIdentity(idA).WithPortProto(tt.aProto, tt.aPort)
outcomeKeys.insertWithChanges(aKey, aEntry, allFeatures, changes)
}
for _, idB := range tt.bIdentities {
bKey := IngressKey().WithIdentity(idB).WithPortProto(tt.bProto, tt.bPort)
outcomeKeys.insertWithChanges(bKey, bEntry, allFeatures, changes)
}
outcomeKeys.validatePortProto(t)
require.True(t, expectedKeys.Equal(&outcomeKeys), "%s (MapState):\n%s\nExpected:\n%s\nObtained:\n%s\n", tt.name, outcomeKeys.diff(&expectedKeys), expectedKeys, outcomeKeys)
// Test also with reverse insertion order
outcomeKeys = emptyMapState()
for _, idB := range tt.bIdentities {
bKey := IngressKey().WithIdentity(idB).WithPortProto(tt.bProto, tt.bPort)
outcomeKeys.insertWithChanges(bKey, bEntry, allFeatures, changes)
}
for _, idA := range tt.aIdentities {
aKey := IngressKey().WithIdentity(idA).WithPortProto(tt.aProto, tt.aPort)
outcomeKeys.insertWithChanges(aKey, aEntry, allFeatures, changes)
}
if tt.withAllowAll {
outcomeKeys.insertWithChanges(anyIngressKey, allowEntry, allFeatures, changes)
}
outcomeKeys.validatePortProto(t)
require.True(t, expectedKeys.Equal(&outcomeKeys), "%s (in reverse) (MapState):\n%s\nExpected:\n%s\nObtained:\n%s\n", tt.name, outcomeKeys.diff(&expectedKeys), expectedKeys, outcomeKeys)
}
// Now test all cases with different traffic directions.
// This should result in both entries being inserted with
// no changes, as they do not affect one another anymore.
for _, tt := range tests {
anyIngressKey := IngressKey()
anyEgressKey := EgressKey()
allowEntry := allowEntry()
var aKeys []Key
for _, idA := range tt.aIdentities {
aKeys = append(aKeys, IngressKey().WithIdentity(idA).WithPortProto(tt.aProto, tt.aPort))
}
aEntry := NewMapStateEntry(types.NewMapStateEntry(tt.aIsDeny, 0, 0, types.NoAuthRequirement))
var bKeys []Key
for _, idB := range tt.bIdentities {
bKeys = append(bKeys, EgressKey().WithIdentity(idB).WithPortProto(tt.bProto, tt.bPort))
}
bEntry := NewMapStateEntry(types.NewMapStateEntry(tt.bIsDeny, 0, 0, types.NoAuthRequirement))
expectedKeys := emptyMapState()
if tt.outcome&insertAllowAll > 0 {
expectedKeys.insert(anyIngressKey, allowEntry)
expectedKeys.insert(anyEgressKey, allowEntry)
}
for _, aKey := range aKeys {
expectedKeys.insert(aKey, aEntry)
}
for _, bKey := range bKeys {
expectedKeys.insert(bKey, bEntry)
}
outcomeKeys := emptyMapState()
changes := ChangeState{}
if tt.withAllowAll {
outcomeKeys.insertWithChanges(anyIngressKey, allowEntry, allFeatures, changes)
outcomeKeys.insertWithChanges(anyEgressKey, allowEntry, allFeatures, changes)
}
for _, aKey := range aKeys {
outcomeKeys.insertWithChanges(aKey, aEntry, allFeatures, changes)
}
for _, bKey := range bKeys {
outcomeKeys.insertWithChanges(bKey, bEntry, allFeatures, changes)
}
outcomeKeys.validatePortProto(t)
require.True(t, expectedKeys.Equal(&outcomeKeys), "%s different traffic directions (MapState):\n%s", tt.name, outcomeKeys.diff(&expectedKeys))
// Test also with reverse insertion order
outcomeKeys = emptyMapState()
for _, bKey := range bKeys {
outcomeKeys.insertWithChanges(bKey, bEntry, allFeatures, changes)
}
for _, aKey := range aKeys {
outcomeKeys.insertWithChanges(aKey, aEntry, allFeatures, changes)
}
if tt.withAllowAll {
outcomeKeys.insertWithChanges(anyEgressKey, allowEntry, allFeatures, changes)
outcomeKeys.insertWithChanges(anyIngressKey, allowEntry, allFeatures, changes)
}
outcomeKeys.validatePortProto(t)
require.True(t, expectedKeys.Equal(&outcomeKeys), "%s different traffic directions (in reverse) (MapState):\n%s", tt.name, outcomeKeys.diff(&expectedKeys))
}
}
func TestMapState_Get_stacktrace(t *testing.T) {
ms := emptyMapState()
// This should produce a stacktrace in the error log. It is not validated here but can be
// observed manually.
// Example log (with newlines expanded):
// time="2024-06-22T23:21:27+03:00" level=error msg="mapState.Get: invalid wildcard port with non-zero mask: Identity=0,DestPort=0,Nexthdr=0,TrafficDirection=0. Stacktrace:
// github.com/hashicorp/go-hclog.Stacktrace
// github.com/cilium/cilium/vendor/github.com/hashicorp/go-hclog/stacktrace.go:51
// github.com/cilium/cilium/pkg/policy.(*mapState).Get
// github.com/cilium/cilium/pkg/policy/mapstate.go:355
// github.com/cilium/cilium/pkg/policy.TestMapState_Get_stacktrace
// github.com/cilium/cilium/pkg/policy/mapstate_test.go:3699
// testing.tRunner
// go/src/testing/testing.go:1689" subsys=policy
log.Error("Expecting an error log on the next log line!")
_, ok := ms.Get(Key{})
assert.False(t, ok)
}
// TestDenyPreferredInsertLogic is now less valuable since we do not have the mapstate
// validator any more, but may still catch bugs.
func TestDenyPreferredInsertLogic(t *testing.T) {
td := newTestData()
td.bootstrapRepo(GenerateCIDRDenyRules, 1000, t)
p, _ := td.repo.resolvePolicyLocked(fooIdentity)
epPolicy := p.DistillPolicy(DummyOwner{}, nil)
epPolicy.Ready()
n := epPolicy.policyMapState.Len()
p.Detach()
assert.Positive(t, n)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"github.com/cilium/cilium/pkg/metrics"
"github.com/prometheus/client_golang/prometheus"
)
const (
// LabelSelectorClass indicates the class of selector being measured
LabelSelectorClass = "class"
// LabelValueSCFQDN is used for regular security identities
// shared between all nodes in the cluster.
LabelValueSCFQDN = "fqdn"
// LabelValueSCCluster is used for the cluster entity.
LabelValueSCCluster = "cluster"
// LabelValueSCWorld is used for the world entity.
LabelValueSCWorld = "world"
// LabelValueSCOther is used for security identities allocated locally
// on the current node.
LabelValueSCOther = "other"
)
var (
selectorCacheMetricsDesc = prometheus.NewDesc(
prometheus.BuildFQName(metrics.CiliumAgentNamespace, "policy_selector", "match_count_max"),
"The maximum number of identities selected by a network policy peer selector",
[]string{LabelSelectorClass},
nil,
)
)
type selectorStats struct {
maxCardinalityByClass map[string]int
}
func newSelectorStats() selectorStats {
return selectorStats{
maxCardinalityByClass: map[string]int{
LabelValueSCFQDN: 0,
LabelValueSCCluster: 0,
LabelValueSCWorld: 0,
LabelValueSCOther: 0,
},
}
}
type selectorStatsCollector interface {
Stats() selectorStats
}
type selectorCacheMetrics struct {
prometheus.Collector
selectorStatsCollector
}
func newSelectorCacheMetrics(sc selectorStatsCollector) prometheus.Collector {
return &selectorCacheMetrics{selectorStatsCollector: sc}
}
func (scm *selectorCacheMetrics) Describe(ch chan<- *prometheus.Desc) {
ch <- selectorCacheMetricsDesc
}
func (scm *selectorCacheMetrics) Collect(ch chan<- prometheus.Metric) {
stats := scm.selectorStatsCollector.Stats()
for class, stat := range stats.maxCardinalityByClass {
ch <- prometheus.MustNewConstMetric(
selectorCacheMetricsDesc, prometheus.GaugeValue, float64(stat), class,
)
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"io"
stdlog "log"
"strconv"
"strings"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/labels"
)
type Tracing int
const (
TRACE_DISABLED Tracing = iota
TRACE_ENABLED
TRACE_VERBOSE
)
// TraceEnabled returns true if the SearchContext requests tracing.
func (s *SearchContext) TraceEnabled() bool {
return s.Trace != TRACE_DISABLED
}
// PolicyTrace logs the given message into the SearchContext logger only if
// TRACE_ENABLED or TRACE_VERBOSE is enabled in the receiver's SearchContext.
func (s *SearchContext) PolicyTrace(format string, a ...interface{}) {
if s.TraceEnabled() {
log.Debugf(format, a...)
if s.Logging != nil {
format = "%-" + s.CallDepth() + "s" + format
a = append([]interface{}{""}, a...)
s.Logging.Printf(format, a...)
}
}
}
// PolicyTraceVerbose logs the given message into the SearchContext logger only
// if TRACE_VERBOSE is enabled in the receiver's SearchContext.
func (s *SearchContext) PolicyTraceVerbose(format string, a ...interface{}) {
switch s.Trace {
case TRACE_VERBOSE:
log.Debugf(format, a...)
if s.Logging != nil {
s.Logging.Printf(format, a...)
}
}
}
// SearchContext defines the context while evaluating policy
type SearchContext struct {
Trace Tracing
Depth int
Logging *stdlog.Logger
From labels.LabelArray
To labels.LabelArray
DPorts []*models.Port
// rulesSelect specifies whether or not to check whether a rule which is
// being analyzed using this SearchContext matches either From or To.
// This is used to avoid using EndpointSelector.Matches() if possible,
// since it is costly in terms of performance.
rulesSelect bool
}
func (s *SearchContext) String() string {
from := make([]string, 0, len(s.From))
to := make([]string, 0, len(s.To))
dports := make([]string, 0, len(s.DPorts))
for _, fromLabel := range s.From {
from = append(from, fromLabel.String())
}
for _, toLabel := range s.To {
to = append(to, toLabel.String())
}
// We should avoid to use `fmt.Sprintf()` since
// it is well-known for not being opimal in terms of
// CPU and memory allocations.
// See https://github.com/cilium/cilium/issues/19571
for _, dport := range s.DPorts {
dportStr := dport.Name
if dportStr == "" {
dportStr = strconv.FormatUint(uint64(dport.Port), 10)
}
dports = append(dports, dportStr+"/"+dport.Protocol)
}
fromStr := strings.Join(from, ", ")
toStr := strings.Join(to, ", ")
if len(dports) != 0 {
dportStr := strings.Join(dports, ", ")
return "From: [" + fromStr + "] => To: [" + toStr + "] Ports: [" + dportStr + "]"
}
return "From: [" + fromStr + "] => To: [" + toStr + "]"
}
func (s *SearchContext) CallDepth() string {
return strconv.Itoa(s.Depth * 2)
}
// WithLogger returns a shallow copy of the received SearchContext with the
// logging set to write to 'log'.
func (s *SearchContext) WithLogger(log io.Writer) *SearchContext {
result := *s
result.Logging = stdlog.New(log, "", 0)
if result.Trace == TRACE_DISABLED {
result.Trace = TRACE_ENABLED
}
return &result
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/labels"
)
func TestSearchContextString(t *testing.T) {
for expected, sc := range map[string]SearchContext{
"From: [unspec:a, unspec:b, unspec:c] => To: [unspec:d, unspec:e, unspec:f] Ports: [HTTP/TCP, HTTPs/TCP]": {
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("a", "c", "b"),
To: labels.ParseLabelArray("d", "e", "f"),
DPorts: []*models.Port{
{
Name: "HTTP",
Port: 80,
Protocol: "TCP",
},
{
Name: "HTTPs",
Port: 442,
Protocol: "TCP",
},
},
rulesSelect: false,
},
"From: [unspec:a, unspec:b, unspec:c] => To: [unspec:d, unspec:e, unspec:f] Ports: [80/TCP, 442/TCP]": {
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("a", "c", "b"),
To: labels.ParseLabelArray("d", "e", "f"),
DPorts: []*models.Port{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 442,
Protocol: "TCP",
},
},
rulesSelect: false,
},
"From: [k8s:a, local:b, unspec:c] => To: [unspec:d, unspec:e, unspec:f]": {
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("k8s:a", "unspec:c", "local:b"),
To: labels.ParseLabelArray("d", "e", "f"),
rulesSelect: false,
},
} {
str := sc.String()
require.Equal(t, expected, str)
}
}
func BenchmarkSearchContextString(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, sc := range []SearchContext{
{
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("a", "t", "b"),
To: labels.ParseLabelArray("d", "e", "f"),
DPorts: []*models.Port{
{
Name: "HTTP",
Port: 80,
Protocol: "TCP",
},
{
Name: "HTTPs",
Port: 442,
Protocol: "TCP",
},
},
rulesSelect: false,
},
{
Trace: 1,
Depth: 0,
From: labels.ParseLabelArray("a", "t", "b"),
To: labels.ParseLabelArray("d", "e", "f"),
DPorts: []*models.Port{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 442,
Protocol: "TCP",
},
},
rulesSelect: false,
},
} {
_ = sc.String()
}
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"math"
"math/bits"
)
// MaskedPort is a port with a wild card mask value.
// The port range is represented by a masked port
// because we need to use masks for policy Keys
// that are indexed in the datapath by a bitwise
// longest-prefix-match trie.
type MaskedPort struct {
port uint16
mask uint16
}
func (m MaskedPort) String() string {
return fmt.Sprintf("{port: 0x%x, mask: 0x%x}", m.port, m.mask)
}
// maskedPort returns a new MaskedPort where 'wildcardBits' lowest bits are wildcarded.
func maskedPort(port uint16, wildcardBits int) MaskedPort {
mask := uint16(math.MaxUint16) << wildcardBits
return MaskedPort{port & mask, mask}
}
// PortRangeToMaskedPorts returns a slice of masked ports for the given port range.
// If the end port is equal to or less then the start port than the start port is returned,
// as a fully masked port.
// Ports are not returned in any particular order, so testing code needs to sort them
// for consistency.
func PortRangeToMaskedPorts(start uint16, end uint16) (ports []MaskedPort) {
// This is a wildcard.
if start == 0 && (end == 0 || end == math.MaxUint16) {
return []MaskedPort{{0, 0}}
}
// This is a single port.
if end <= start {
return []MaskedPort{{start, 0xffff}}
}
// Find the number of common leading bits. The first uncommon bit will be 0 for the start
// and 1 for the end.
commonBits := bits.LeadingZeros16(start ^ end)
// Cover the case where all the bits after the common bits are zeros on start and ones on
// end. In this case the range can be represented by a single masked port instead of two
// that would be produced below.
// For example, if the range is from 16-31 (0b10000 - 0b11111), then we return 0b1xxxx
// instead of 0b10xxx and 0b11xxx that would be produced when approaching the middle from
// the two sides.
//
// This also covers the trivial case where all the bits are in common (i.e., start == end).
mask := uint16(math.MaxUint16) >> commonBits
if start&mask == 0 && ^end&mask == 0 {
return []MaskedPort{maskedPort(start, 16-commonBits)}
}
// Find the "middle point" toward which the masked ports approach from both sides.
// This "middle point" is the highest bit that differs between the range start and end.
middleBit := 16 - 1 - commonBits
middle := uint16(1 << middleBit)
// Wildcard the trailing zeroes to the right of the middle bit of the range start.
// This covers the values immediately following the port range start, including the start itself.
// The middle bit is added to avoid counting zeroes past it.
bit := bits.TrailingZeros16(start | middle)
ports = append(ports, maskedPort(start, bit))
// Find all 0-bits between the trailing zeroes and the middle bit and add MaskedPorts where
// each found 0-bit is set and the lower bits are wildcarded. This covers the range from the
// start to the middle not covered by the trailing zeroes above.
// The current 'bit' is skipped since we know it is 1.
for bit++; bit < middleBit; bit++ {
if start&(1<<bit) == 0 {
// Adding 1<<bit will set the bit since we know it is not set
ports = append(ports, maskedPort(start+1<<bit, bit))
}
}
// Wildcard the trailing ones to the right of the middle bit of the range end.
// This covers the values immediately preceding and including the range end.
// The middle bit is added to avoid counting ones past it.
bit = bits.TrailingZeros16(^end | middle)
ports = append(ports, maskedPort(end, bit))
// Find all 1-bits between the trailing ones and the middle bit and add MaskedPorts where
// each found 1-bit is cleared and the lower bits are wildcarded. This covers the range from
// the end to the middle not covered by the trailing ones above.
// The current 'bit' is skipped since we know it is 0.
for bit++; bit < middleBit; bit++ {
if end&(1<<bit) != 0 {
// Subtracting 1<<bit will clear the bit since we know it is set
ports = append(ports, maskedPort(end-1<<bit, bit))
}
}
return ports
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"strconv"
"strings"
"github.com/cilium/cilium/pkg/policy/trafficdirection"
"github.com/cilium/cilium/pkg/u8proto"
)
// ProxyStatsKey returns a key for endpoint's proxy stats, which may aggregate stats from multiple
// proxy redirects on the same port.
func ProxyStatsKey(ingress bool, protocol string, port, proxyPort uint16) string {
direction := "egress"
if ingress {
direction = "ingress"
}
portStr := strconv.FormatUint(uint64(port), 10)
proxyPortStr := strconv.FormatUint(uint64(proxyPort), 10)
var str strings.Builder
str.Grow(len(direction) + 1 + len(protocol) + 1 + len(portStr) + 1 + len(proxyPortStr))
str.WriteString(direction)
str.WriteRune(':')
str.WriteString(protocol)
str.WriteRune(':')
str.WriteString(portStr)
str.WriteRune(':')
str.WriteString(proxyPortStr)
return str.String()
}
// ProxyID returns a unique string to identify a proxy mapping.
func ProxyID(endpointID uint16, ingress bool, protocol string, port uint16, listener string) string {
direction := "egress"
if ingress {
direction = "ingress"
}
epStr := strconv.FormatUint(uint64(endpointID), 10)
portStr := strconv.FormatUint(uint64(port), 10)
var str strings.Builder
str.Grow(len(epStr) + 1 + len(direction) + 1 + len(protocol) + 1 + len(portStr) + 1 + len(listener))
str.WriteString(epStr)
str.WriteRune(':')
str.WriteString(direction)
str.WriteRune(':')
str.WriteString(protocol)
str.WriteRune(':')
str.WriteString(portStr)
str.WriteRune(':')
str.WriteString(listener)
return str.String()
}
// ProxyIDFromKey returns a unique string to identify a proxy mapping.
func ProxyIDFromKey(endpointID uint16, key Key, listener string) string {
return ProxyID(endpointID, key.TrafficDirection() == trafficdirection.Ingress, u8proto.U8proto(key.Nexthdr).String(), key.DestPort, listener)
}
// ParseProxyID parses a proxy ID returned by ProxyID and returns its components.
func ParseProxyID(proxyID string) (endpointID uint16, ingress bool, protocol string, port uint16, listener string, err error) {
comps := strings.Split(proxyID, ":")
if len(comps) != 5 {
err = fmt.Errorf("invalid proxy ID structure: %s", proxyID)
return
}
epID, err := strconv.ParseUint(comps[0], 10, 16)
if err != nil {
return
}
endpointID = uint16(epID)
ingress = comps[1] == "ingress"
protocol = comps[2]
l4port, err := strconv.ParseUint(comps[3], 10, 16)
if err != nil {
return
}
port = uint16(l4port)
listener = comps[4]
return
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"cmp"
"context"
"encoding/json"
"fmt"
"maps"
"slices"
"sync/atomic"
cilium "github.com/cilium/proxy/go/cilium/api"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/container/set"
"github.com/cilium/cilium/pkg/crypto/certificatemanager"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/identitymanager"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/spanstat"
)
// PolicyContext is an interface policy resolution functions use to access the Repository.
// This way testing code can run without mocking a full Repository.
type PolicyContext interface {
// return the namespace in which the policy rule is being resolved
GetNamespace() string
// return the SelectorCache
GetSelectorCache() *SelectorCache
// GetTLSContext resolves the given 'api.TLSContext' into CA
// certs and the public and private keys, using secrets from
// k8s or from the local file system.
GetTLSContext(tls *api.TLSContext) (ca, public, private string, inlineSecrets bool, err error)
// GetEnvoyHTTPRules translates the given 'api.L7Rules' into
// the protobuf representation the Envoy can consume. The bool
// return parameter tells whether the rule enforcement can
// be short-circuited upon the first allowing rule. This is
// false if any of the rules has side-effects, requiring all
// such rules being evaluated.
GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool)
// IsDeny returns true if the policy computation should be done for the
// policy deny case. This function returns different values depending on the
// code path as it can be changed during the policy calculation.
IsDeny() bool
// SetDeny sets the Deny field of the PolicyContext and returns the old
// value stored.
SetDeny(newValue bool) (oldValue bool)
}
type policyContext struct {
repo *Repository
ns string
// isDeny this field is set to true if the given policy computation should
// be done for the policy deny.
isDeny bool
}
// GetNamespace() returns the namespace for the policy rule being resolved
func (p *policyContext) GetNamespace() string {
return p.ns
}
// GetSelectorCache() returns the selector cache used by the Repository
func (p *policyContext) GetSelectorCache() *SelectorCache {
return p.repo.GetSelectorCache()
}
// GetTLSContext() returns data for TLS Context via a CertificateManager
func (p *policyContext) GetTLSContext(tls *api.TLSContext) (ca, public, private string, inlineSecrets bool, err error) {
if p.repo.certManager == nil {
return "", "", "", false, fmt.Errorf("No Certificate Manager set on Policy Repository")
}
return p.repo.certManager.GetTLSContext(context.TODO(), tls, p.ns)
}
func (p *policyContext) GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) {
return p.repo.GetEnvoyHTTPRules(l7Rules, p.ns)
}
// IsDeny returns true if the policy computation should be done for the
// policy deny case. This function return different values depending on the
// code path as it can be changed during the policy calculation.
func (p *policyContext) IsDeny() bool {
return p.isDeny
}
// SetDeny sets the Deny field of the PolicyContext and returns the old
// value stored.
func (p *policyContext) SetDeny(deny bool) bool {
oldDeny := p.isDeny
p.isDeny = deny
return oldDeny
}
type PolicyRepository interface {
BumpRevision() uint64
GetAuthTypes(localID identity.NumericIdentity, remoteID identity.NumericIdentity) AuthTypes
GetEnvoyHTTPRules(l7Rules *api.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool)
// GetSelectorPolicy computes the SelectorPolicy for a given identity.
//
// It returns nil if skipRevision is >= than the already calculated version.
// This is used to skip policy calculation when a certain revision delta is
// known to not affect the given identity. Pass a skipRevision of 0 to force
// calculation.
GetSelectorPolicy(id *identity.Identity, skipRevision uint64, stats GetPolicyStatistics) (SelectorPolicy, uint64, error)
GetRevision() uint64
GetRulesList() *models.Policy
GetSelectorCache() *SelectorCache
Iterate(f func(rule *api.Rule))
ReplaceByResource(rules api.Rules, resource ipcachetypes.ResourceID) (affectedIDs *set.Set[identity.NumericIdentity], rev uint64, oldRevCnt int)
ReplaceByLabels(rules api.Rules, searchLabelsList []labels.LabelArray) (affectedIDs *set.Set[identity.NumericIdentity], rev uint64, oldRevCnt int)
Search(lbls labels.LabelArray) (api.Rules, uint64)
SetEnvoyRulesFunc(f func(certificatemanager.SecretManager, *api.L7Rules, string, string) (*cilium.HttpNetworkPolicyRules, bool))
}
type GetPolicyStatistics interface {
WaitingForPolicyRepository() *spanstat.SpanStat
SelectorPolicyCalculation() *spanstat.SpanStat
}
// Repository is a list of policy rules which in combination form the security
// policy. A policy repository can be
type Repository struct {
// mutex protects the whole policy tree
mutex lock.RWMutex
rules map[ruleKey]*rule
rulesByNamespace map[string]sets.Set[ruleKey]
rulesByResource map[ipcachetypes.ResourceID]map[ruleKey]*rule
// We will need a way to synthesize a rule key for rules without a resource;
// these are - in practice - very rare, as they only come from the local API,
// never via k8s.
nextID uint
// revision is the revision of the policy repository. It will be
// incremented whenever the policy repository is changed.
// Always positive (>0).
revision atomic.Uint64
// SelectorCache tracks the selectors used in the policies
// resolved from the repository.
selectorCache *SelectorCache
// PolicyCache tracks the selector policies created from this repo
policyCache *policyCache
certManager certificatemanager.CertificateManager
secretManager certificatemanager.SecretManager
getEnvoyHTTPRules func(certificatemanager.SecretManager, *api.L7Rules, string, string) (*cilium.HttpNetworkPolicyRules, bool)
metricsManager api.PolicyMetrics
}
// GetSelectorCache() returns the selector cache used by the Repository
func (p *Repository) GetSelectorCache() *SelectorCache {
return p.selectorCache
}
// GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID
func (p *Repository) GetAuthTypes(localID, remoteID identity.NumericIdentity) AuthTypes {
return p.policyCache.getAuthTypes(localID, remoteID)
}
func (p *Repository) SetEnvoyRulesFunc(f func(certificatemanager.SecretManager, *api.L7Rules, string, string) (*cilium.HttpNetworkPolicyRules, bool)) {
p.getEnvoyHTTPRules = f
}
func (p *Repository) GetEnvoyHTTPRules(l7Rules *api.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool) {
if p.getEnvoyHTTPRules == nil {
return nil, true
}
return p.getEnvoyHTTPRules(p.secretManager, l7Rules, ns, p.secretManager.GetSecretSyncNamespace())
}
// NewPolicyRepository creates a new policy repository.
func NewPolicyRepository(
initialIDs identity.IdentityMap,
certManager certificatemanager.CertificateManager,
secretManager certificatemanager.SecretManager,
idmgr identitymanager.IDManager,
metricsManager api.PolicyMetrics,
) *Repository {
selectorCache := NewSelectorCache(initialIDs)
repo := &Repository{
rules: make(map[ruleKey]*rule),
rulesByNamespace: make(map[string]sets.Set[ruleKey]),
rulesByResource: make(map[ipcachetypes.ResourceID]map[ruleKey]*rule),
selectorCache: selectorCache,
certManager: certManager,
secretManager: secretManager,
metricsManager: metricsManager,
}
repo.revision.Store(1)
repo.policyCache = newPolicyCache(repo, idmgr)
return repo
}
// traceState is an internal structure used to collect information
// while determining policy decision
type traceState struct {
// selectedRules is the number of rules with matching EndpointSelector
selectedRules int
// matchedRules is the number of rules that have allowed traffic
matchedRules int
// matchedDenyRules is the number of rules that have denied traffic
matchedDenyRules int
// constrainedRules counts how many "FromRequires" constraints are
// unsatisfied
constrainedRules int
// ruleID is the rule ID currently being evaluated
ruleID int
}
func (state *traceState) trace(rules int, ctx *SearchContext) {
ctx.PolicyTrace("%d/%d rules selected\n", state.selectedRules, rules)
if state.constrainedRules > 0 {
ctx.PolicyTrace("Found unsatisfied FromRequires constraint\n")
} else {
if state.matchedRules > 0 {
ctx.PolicyTrace("Found allow rule\n")
} else {
ctx.PolicyTrace("Found no allow rule\n")
}
if state.matchedDenyRules > 0 {
ctx.PolicyTrace("Found deny rule\n")
} else {
ctx.PolicyTrace("Found no deny rule\n")
}
}
}
// ResolveL4IngressPolicy resolves the L4 ingress policy for a set of endpoints
// by searching the policy repository for `PortRule` rules that are attached to
// a `Rule` where the EndpointSelector matches `ctx.To`. `ctx.From` takes no effect and
// is ignored in the search. If multiple `PortRule` rules are found, all rules
// are merged together. If rules contains overlapping port definitions, the first
// rule found in the repository takes precedence.
//
// TODO: Coalesce l7 rules?
//
// Caller must release resources by calling Detach() on the returned map!
//
// NOTE: This is only called from unit tests, but from multiple packages.
func (p *Repository) ResolveL4IngressPolicy(ctx *SearchContext) (L4PolicyMap, error) {
policyCtx := policyContext{
repo: p,
ns: ctx.To.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
rules := make(ruleSlice, 0, len(p.rules))
for _, rule := range p.rules {
rules = append(rules, rule)
}
// Sort for unit tests
slices.SortFunc[ruleSlice](rules, func(a, b *rule) int {
return cmp.Compare(a.key.idx, b.key.idx)
})
result, err := rules.resolveL4IngressPolicy(&policyCtx, ctx)
if err != nil {
return nil, err
}
return result, nil
}
// ResolveL4EgressPolicy resolves the L4 egress policy for a set of endpoints
// by searching the policy repository for `PortRule` rules that are attached to
// a `Rule` where the EndpointSelector matches `ctx.From`. `ctx.To` takes no effect and
// is ignored in the search. If multiple `PortRule` rules are found, all rules
// are merged together. If rules contains overlapping port definitions, the first
// rule found in the repository takes precedence.
//
// Caller must release resources by calling Detach() on the returned map!
//
// NOTE: This is only called from unit tests, but from multiple packages.
func (p *Repository) ResolveL4EgressPolicy(ctx *SearchContext) (L4PolicyMap, error) {
policyCtx := policyContext{
repo: p,
ns: ctx.From.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
rules := make(ruleSlice, 0, len(p.rules))
for _, rule := range p.rules {
rules = append(rules, rule)
}
slices.SortFunc[ruleSlice](rules, func(a, b *rule) int {
return cmp.Compare(a.key.idx, b.key.idx)
})
result, err := rules.resolveL4EgressPolicy(&policyCtx, ctx)
if err != nil {
return nil, err
}
return result, nil
}
// AllowsIngressRLocked evaluates the policy repository for the provided search
// context and returns the verdict for ingress. If no matching policy allows for
// the connection, the request will be denied. The policy repository mutex must
// be held.
//
// NOTE: This is only called from unit tests, but from multiple packages.
func (p *Repository) AllowsIngressRLocked(ctx *SearchContext) api.Decision {
// Lack of DPorts in the SearchContext means L3-only search
if len(ctx.DPorts) == 0 {
newCtx := *ctx
newCtx.DPorts = []*models.Port{{
Port: 0,
Protocol: models.PortProtocolANY,
}}
ctx = &newCtx
}
ctx.PolicyTrace("Tracing %s", ctx.String())
ingressPolicy, err := p.ResolveL4IngressPolicy(ctx)
if err != nil {
log.WithError(err).Warn("Evaluation error while resolving L4 ingress policy")
}
verdict := api.Denied
if err == nil && ingressPolicy.Len() > 0 {
verdict = ingressPolicy.IngressCoversContext(ctx)
}
ctx.PolicyTrace("Ingress verdict: %s", verdict.String())
ingressPolicy.Detach(p.GetSelectorCache())
return verdict
}
// AllowsEgressRLocked evaluates the policy repository for the provided search
// context and returns the verdict. If no matching policy allows for the
// connection, the request will be denied. The policy repository mutex must be
// held.
//
// NOTE: This is only called from unit tests, but from multiple packages.
func (p *Repository) AllowsEgressRLocked(ctx *SearchContext) api.Decision {
// Lack of DPorts in the SearchContext means L3-only search
if len(ctx.DPorts) == 0 {
newCtx := *ctx
newCtx.DPorts = []*models.Port{{
Port: 0,
Protocol: models.PortProtocolANY,
}}
ctx = &newCtx
}
ctx.PolicyTrace("Tracing %s\n", ctx.String())
egressPolicy, err := p.ResolveL4EgressPolicy(ctx)
if err != nil {
log.WithError(err).Warn("Evaluation error while resolving L4 egress policy")
}
verdict := api.Denied
if err == nil && egressPolicy.Len() > 0 {
verdict = egressPolicy.EgressCoversContext(ctx)
}
ctx.PolicyTrace("Egress verdict: %s", verdict.String())
egressPolicy.Detach(p.GetSelectorCache())
return verdict
}
func (p *Repository) Search(lbls labels.LabelArray) (api.Rules, uint64) {
p.mutex.RLock()
defer p.mutex.RUnlock()
return p.searchRLocked(lbls), p.GetRevision()
}
// searchRLocked searches the policy repository for rules which match the
// specified labels and will return an array of all rules which matched.
func (p *Repository) searchRLocked(lbls labels.LabelArray) api.Rules {
result := api.Rules{}
for _, r := range p.rules {
if r.Labels.Contains(lbls) {
result = append(result, &r.Rule)
}
}
return result
}
// addListLocked inserts a rule into the policy repository with the repository already locked
// Expects that the entire rule list has already been sanitized.
//
// Only used by unit tests, but by multiple packages.
func (p *Repository) addListLocked(rules api.Rules) (ruleSlice, uint64) {
newRules := make(ruleSlice, 0, len(rules))
for _, r := range rules {
newRule := p.newRule(*r, ruleKey{idx: p.nextID})
newRules = append(newRules, newRule)
p.insert(newRule)
p.nextID++
}
return newRules, p.BumpRevision()
}
func (p *Repository) insert(r *rule) {
p.rules[r.key] = r
p.metricsManager.AddRule(r.Rule)
if _, ok := p.rulesByNamespace[r.key.resource.Namespace()]; !ok {
p.rulesByNamespace[r.key.resource.Namespace()] = sets.New[ruleKey]()
}
p.rulesByNamespace[r.key.resource.Namespace()].Insert(r.key)
rid := r.key.resource
if len(rid) > 0 {
if p.rulesByResource[rid] == nil {
p.rulesByResource[rid] = map[ruleKey]*rule{}
}
p.rulesByResource[rid][r.key] = r
}
metrics.Policy.Inc()
}
func (p *Repository) del(key ruleKey) {
r := p.rules[key]
if r == nil {
return
}
p.metricsManager.DelRule(r.Rule)
delete(p.rules, key)
p.rulesByNamespace[key.resource.Namespace()].Delete(key)
if len(p.rulesByNamespace[key.resource.Namespace()]) == 0 {
delete(p.rulesByNamespace, key.resource.Namespace())
}
rid := key.resource
if len(rid) > 0 && p.rulesByResource[rid] != nil {
delete(p.rulesByResource[rid], key)
if len(p.rulesByResource[rid]) == 0 {
delete(p.rulesByResource, rid)
}
}
metrics.Policy.Dec()
}
// newRule allocates a CachedSelector for a given rule.
func (p *Repository) newRule(apiRule api.Rule, key ruleKey) *rule {
r := &rule{
Rule: apiRule,
key: key,
}
r.subjectSelector, _ = p.selectorCache.AddIdentitySelector(r, makeStringLabels(r.Labels), *r.getSelector())
return r
}
// releaseRule releases the cached selector for a given rul
func (p *Repository) releaseRule(r *rule) {
if r.subjectSelector != nil {
p.selectorCache.RemoveSelector(r.subjectSelector, r)
}
}
// MustAddList inserts a rule into the policy repository. It is used for
// unit-testing purposes only. Panics if the rule is invalid
func (p *Repository) MustAddList(rules api.Rules) (ruleSlice, uint64) {
for i := range rules {
err := rules[i].Sanitize()
if err != nil {
panic(err)
}
}
p.mutex.Lock()
defer p.mutex.Unlock()
return p.addListLocked(rules)
}
// Iterate iterates the policy repository, calling f for each rule. It is safe
// to execute Iterate concurrently.
func (p *Repository) Iterate(f func(rule *api.Rule)) {
p.mutex.RWMutex.Lock()
defer p.mutex.RWMutex.Unlock()
for _, r := range p.rules {
f(&r.Rule)
}
}
// JSONMarshalRules returns a slice of policy rules as string in JSON
// representation
func JSONMarshalRules(rules api.Rules) string {
b, err := json.MarshalIndent(rules, "", " ")
if err != nil {
return err.Error()
}
return string(b)
}
// GetRevision returns the revision of the policy repository
func (p *Repository) GetRevision() uint64 {
return p.revision.Load()
}
// BumpRevision allows forcing policy regeneration
func (p *Repository) BumpRevision() uint64 {
metrics.PolicyRevision.Inc()
return p.revision.Add(1)
}
// GetRulesList returns the current policy
func (p *Repository) GetRulesList() *models.Policy {
p.mutex.RLock()
defer p.mutex.RUnlock()
lbls := labels.ParseSelectLabelArrayFromArray([]string{})
ruleList := p.searchRLocked(lbls)
return &models.Policy{
Revision: int64(p.GetRevision()),
Policy: JSONMarshalRules(ruleList),
}
}
// resolvePolicyLocked returns the selectorPolicy for the provided
// identity from the set of rules in the repository. If the policy
// cannot be generated due to conflicts at L4 or L7, returns an error.
//
// Must be performed while holding the Repository lock.
func (p *Repository) resolvePolicyLocked(securityIdentity *identity.Identity) (*selectorPolicy, error) {
// First obtain whether policy applies in both traffic directions, as well
// as list of rules which actually select this endpoint. This allows us
// to not have to iterate through the entire rule list multiple times and
// perform the matching decision again when computing policy for each
// protocol layer, which is quite costly in terms of performance.
ingressEnabled, egressEnabled,
matchingRules := p.computePolicyEnforcementAndRules(securityIdentity)
calculatedPolicy := &selectorPolicy{
Revision: p.GetRevision(),
SelectorCache: p.GetSelectorCache(),
L4Policy: NewL4Policy(p.GetRevision()),
IngressPolicyEnabled: ingressEnabled,
EgressPolicyEnabled: egressEnabled,
}
lbls := securityIdentity.LabelArray
ingressCtx := SearchContext{
To: lbls,
rulesSelect: true,
}
egressCtx := SearchContext{
From: lbls,
rulesSelect: true,
}
if option.Config.TracingEnabled() {
ingressCtx.Trace = TRACE_ENABLED
egressCtx.Trace = TRACE_ENABLED
}
policyCtx := policyContext{
repo: p,
ns: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
if ingressEnabled {
newL4IngressPolicy, err := matchingRules.resolveL4IngressPolicy(&policyCtx, &ingressCtx)
if err != nil {
return nil, err
}
calculatedPolicy.L4Policy.Ingress.PortRules = newL4IngressPolicy
}
if egressEnabled {
newL4EgressPolicy, err := matchingRules.resolveL4EgressPolicy(&policyCtx, &egressCtx)
if err != nil {
return nil, err
}
calculatedPolicy.L4Policy.Egress.PortRules = newL4EgressPolicy
}
// Make the calculated policy ready for incremental updates
calculatedPolicy.Attach(&policyCtx)
return calculatedPolicy, nil
}
// computePolicyEnforcementAndRules returns whether policy applies at ingress or ingress
// for the given security identity, as well as a list of any rules which select
// the set of labels of the given security identity.
//
// Must be called with repo mutex held for reading.
func (p *Repository) computePolicyEnforcementAndRules(securityIdentity *identity.Identity) (
ingress, egress bool,
matchingRules ruleSlice,
) {
lbls := securityIdentity.LabelArray
// Check if policy enforcement should be enabled at the daemon level.
if lbls.Has(labels.IDNameHost) && !option.Config.EnableHostFirewall {
return false, false, nil
}
policyMode := GetPolicyEnabled()
// If policy enforcement isn't enabled, we do not enable policy
// enforcement for the endpoint. We don't care about returning any
// rules that match.
if policyMode == option.NeverEnforce {
return false, false, nil
}
matchingRules = []*rule{}
// Match cluster-wide rules
for rKey := range p.rulesByNamespace[""] {
r := p.rules[rKey]
if r.matchesSubject(securityIdentity) {
matchingRules = append(matchingRules, r)
}
}
// Match namespace-specific rules
namespace := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel)
if namespace != "" {
for rKey := range p.rulesByNamespace[namespace] {
r := p.rules[rKey]
if r.matchesSubject(securityIdentity) {
matchingRules = append(matchingRules, r)
}
}
}
// If policy enforcement is enabled for the daemon, then it has to be
// enabled for the endpoint.
// If the endpoint has the reserved:init label, i.e. if it has not yet
// received any labels, always enforce policy (default deny).
if policyMode == option.AlwaysEnforce || lbls.Has(labels.IDNameInit) {
return true, true, matchingRules
}
// Determine the default policy for each direction.
//
// By default, endpoints have no policy and all traffic is allowed.
// If any rules select the endpoint, then the endpoint switches to a
// default-deny mode (same as traffic being enabled), per-direction.
//
// Rules, however, can optionally be configure to not enable default deny mode.
// If no rules enable default-deny, then all traffic is allowed except that explicitly
// denied by a Deny rule.
//
// There are three possible cases _per direction_:
// 1: No rules are present,
// 2: At least one default-deny rule is present. Then, policy is enabled
// 3: Only non-default-deny rules are present. Then, policy is enabled, but we must insert
// an additional allow-all rule. We must do this, even if all traffic is allowed, because
// rules may have additional effects such as enabling L7 proxy.
hasIngressDefaultDeny := false
hasEgressDefaultDeny := false
for _, r := range matchingRules {
if !ingress || !hasIngressDefaultDeny { // short-circuit len()
if len(r.Ingress) > 0 || len(r.IngressDeny) > 0 {
ingress = true
if *r.EnableDefaultDeny.Ingress {
hasIngressDefaultDeny = true
}
}
}
if !egress || !hasEgressDefaultDeny { // short-circuit len()
if len(r.Egress) > 0 || len(r.EgressDeny) > 0 {
egress = true
if *r.EnableDefaultDeny.Egress {
hasEgressDefaultDeny = true
}
}
}
if ingress && egress && hasIngressDefaultDeny && hasEgressDefaultDeny {
break
}
}
// If there only ingress default-allow rules, then insert a wildcard rule
if !hasIngressDefaultDeny && ingress {
log.WithField(logfields.Identity, securityIdentity).Debug("Only default-allow policies, synthesizing ingress wildcard-allow rule")
matchingRules = append(matchingRules, wildcardRule(securityIdentity.LabelArray, true /*ingress*/))
}
// Same for egress -- synthesize a wildcard rule
if !hasEgressDefaultDeny && egress {
log.WithField(logfields.Identity, securityIdentity).Debug("Only default-allow policies, synthesizing egress wildcard-allow rule")
matchingRules = append(matchingRules, wildcardRule(securityIdentity.LabelArray, false /*egress*/))
}
return
}
// wildcardRule generates a wildcard rule that only selects the given identity.
func wildcardRule(lbls labels.LabelArray, ingress bool) *rule {
r := &rule{}
if ingress {
r.Ingress = []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityAll},
},
},
}
} else {
r.Egress = []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
},
}
}
es := api.NewESFromLabels(lbls...)
if lbls.Has(labels.IDNameHost) {
r.NodeSelector = es
} else {
r.EndpointSelector = es
}
_ = r.Sanitize()
return r
}
// GetSelectorPolicy computes the SelectorPolicy for a given identity.
//
// It returns nil if skipRevision is >= than the already calculated version.
// This is used to skip policy calculation when a certain revision delta is
// known to not affect the given identity. Pass a skipRevision of 0 to force
// calculation.
func (r *Repository) GetSelectorPolicy(id *identity.Identity, skipRevision uint64, stats GetPolicyStatistics) (SelectorPolicy, uint64, error) {
stats.WaitingForPolicyRepository().Start()
r.mutex.RLock()
defer r.mutex.RUnlock()
stats.WaitingForPolicyRepository().End(true)
rev := r.GetRevision()
// Do we already have a given revision?
// If so, skip calculation.
if skipRevision >= rev {
return nil, rev, nil
}
stats.SelectorPolicyCalculation().Start()
// This may call back in to the (locked) repository to generate the
// selector policy
sp, updated, err := r.policyCache.updateSelectorPolicy(id)
stats.SelectorPolicyCalculation().EndError(err)
// If we hit cache, reset the statistics.
if !updated {
stats.SelectorPolicyCalculation().Reset()
}
return sp, rev, err
}
// ReplaceByResource replaces all rules by resource, returning the complete set of affected endpoints.
func (p *Repository) ReplaceByResource(rules api.Rules, resource ipcachetypes.ResourceID) (affectedIDs *set.Set[identity.NumericIdentity], rev uint64, oldRuleCnt int) {
if len(resource) == 0 {
// This should never ever be hit, as the caller should have already validated the resource.
// Out of paranoia, do nothing.
log.Error("Attempt to replace rules by resource with an empty resource.")
return
}
p.mutex.Lock()
defer p.mutex.Unlock()
affectedIDs = &set.Set[identity.NumericIdentity]{}
oldRules := maps.Clone(p.rulesByResource[resource]) // need to clone as `p.del()` mutates this
for key, oldRule := range oldRules {
for _, subj := range oldRule.getSubjects() {
affectedIDs.Insert(subj)
}
p.del(key)
}
if len(rules) > 0 {
p.rulesByResource[resource] = make(map[ruleKey]*rule, len(rules))
for i, r := range rules {
newRule := p.newRule(*r, ruleKey{resource: resource, idx: uint(i)})
p.insert(newRule)
for _, subj := range newRule.getSubjects() {
affectedIDs.Insert(subj)
}
}
}
// Now that selectors have been allocated for new rules,
// we may release the old ones.
for _, r := range oldRules {
p.releaseRule(r)
}
return affectedIDs, p.BumpRevision(), len(oldRules)
}
// ReplaceByLabels implements the somewhat awkward REST local API for providing network policy,
// where the "key" is a list of labels, possibly multiple, that should be removed before
// installing the new rules.
func (p *Repository) ReplaceByLabels(rules api.Rules, searchLabelsList []labels.LabelArray) (affectedIDs *set.Set[identity.NumericIdentity], rev uint64, oldRuleCnt int) {
p.mutex.Lock()
defer p.mutex.Unlock()
var oldRules []*rule
affectedIDs = &set.Set[identity.NumericIdentity]{}
// determine outgoing rules
for ruleKey, rule := range p.rules {
for _, searchLabels := range searchLabelsList {
if rule.Labels.Contains(searchLabels) {
p.del(ruleKey)
oldRules = append(oldRules, rule)
break
}
}
}
// Insert new rules, allocating a subject selector
for _, r := range rules {
newRule := p.newRule(*r, ruleKey{idx: p.nextID})
p.insert(newRule)
p.nextID++
for _, nid := range newRule.getSubjects() {
affectedIDs.Insert(nid)
}
}
// Now that subject selectors have been allocated, release the old rules.
for _, oldRule := range oldRules {
for _, nid := range oldRule.getSubjects() {
affectedIDs.Insert(nid)
}
p.releaseRule(oldRule)
}
return affectedIDs, p.BumpRevision(), len(oldRules)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"bytes"
"fmt"
stdlog "log"
"sync"
"testing"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/identity"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
)
// mustAdd inserts a rule into the policy repository
// This is just a helper function for unit testing.
// Only returns error for signature reasons
func (p *Repository) mustAdd(r api.Rule) (uint64, map[uint16]struct{}, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
if err := r.Sanitize(); err != nil {
panic(err)
}
newList := make([]*api.Rule, 1)
newList[0] = &r
_, rev := p.addListLocked(newList)
return rev, map[uint16]struct{}{}, nil
}
func TestComputePolicyEnforcementAndRules(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
td := newTestData()
repo := td.repo
fooSelectLabel := labels.ParseSelectLabel("foo")
fooNumericIdentity := 9001
fooIdentity := identity.NewIdentity(identity.NumericIdentity(fooNumericIdentity), lbls)
td.addIdentity(fooIdentity)
fooIngressRule1Label := labels.NewLabel(k8sConst.PolicyLabelName, "fooIngressRule1", labels.LabelSourceAny)
fooIngressRule2Label := labels.NewLabel(k8sConst.PolicyLabelName, "fooIngressRule2", labels.LabelSourceAny)
fooEgressRule1Label := labels.NewLabel(k8sConst.PolicyLabelName, "fooEgressRule1", labels.LabelSourceAny)
fooEgressRule2Label := labels.NewLabel(k8sConst.PolicyLabelName, "fooEgressRule2", labels.LabelSourceAny)
combinedLabel := labels.NewLabel(k8sConst.PolicyLabelName, "combined", labels.LabelSourceAny)
initIdentity := identity.LookupReservedIdentity(identity.ReservedIdentityInit)
// lal takes a single label and returns a []labels.LabelArray containing only that label
lal := func(lbl labels.Label) []labels.LabelArray {
return []labels.LabelArray{{lbl}}
}
fooIngressRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
fooIngressRule1Label,
},
}
fooIngressRule1.Sanitize()
fooIngressRule2 := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
fooIngressRule2Label,
},
}
fooIngressRule2.Sanitize()
fooEgressRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
fooEgressRule1Label,
},
}
fooEgressRule1.Sanitize()
fooEgressRule2 := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
fooEgressRule2Label,
},
}
fooEgressRule2.Sanitize()
combinedRule := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.NewESFromLabels(fooSelectLabel),
},
},
},
},
Labels: labels.LabelArray{
combinedLabel,
},
}
combinedRule.Sanitize()
ing, egr, matchingRules := repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since no rules are in repository")
require.False(t, egr, "egress policy enforcement should not apply since no rules are in repository")
require.EqualValues(t, ruleSlice{}, matchingRules, "returned matching rules did not match")
_, _, err := repo.mustAdd(fooIngressRule1)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.False(t, egr, "egress policy enforcement should not apply since no egress rules select")
require.EqualValues(t, fooIngressRule1, matchingRules[0].Rule, "returned matching rules did not match")
_, _, err = repo.mustAdd(fooIngressRule2)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.False(t, egr, "egress policy enforcement should not apply since no egress rules select")
require.ElementsMatch(t, matchingRules.AsPolicyRules(), api.Rules{&fooIngressRule1, &fooIngressRule2})
_, _, numDeleted := repo.ReplaceByLabels(nil, lal(fooIngressRule1Label))
require.Equal(t, 1, numDeleted)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.False(t, egr, "egress policy enforcement should not apply since no egress rules select")
require.EqualValues(t, fooIngressRule2, matchingRules[0].Rule, "returned matching rules did not match")
_, _, numDeleted = repo.ReplaceByLabels(nil, lal(fooIngressRule2Label))
require.Equal(t, 1, numDeleted)
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since no rules are in repository")
require.False(t, egr, "egress policy enforcement should not apply since no rules are in repository")
require.EqualValues(t, ruleSlice{}, matchingRules, "returned matching rules did not match")
_, _, err = repo.mustAdd(fooEgressRule1)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since no ingress rules select")
require.True(t, egr, "egress policy enforcement should apply since egress rules select")
require.EqualValues(t, fooEgressRule1, matchingRules[0].Rule, "returned matching rules did not match")
_, _, numDeleted = repo.ReplaceByLabels(nil, lal(fooEgressRule1Label))
require.Equal(t, 1, numDeleted)
_, _, err = repo.mustAdd(fooEgressRule2)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since no ingress rules select")
require.True(t, egr, "egress policy enforcement should apply since egress rules select")
require.EqualValues(t, fooEgressRule2, matchingRules[0].Rule, "returned matching rules did not match")
_, _, numDeleted = repo.ReplaceByLabels(nil, lal(fooEgressRule2Label))
require.Equal(t, 1, numDeleted)
_, _, err = repo.mustAdd(combinedRule)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.True(t, egr, "egress policy enforcement should apply since egress rules selects")
require.EqualValues(t, combinedRule, matchingRules[0].Rule, "returned matching rules did not match")
_, _, numDeleted = repo.ReplaceByLabels(nil, lal(combinedLabel))
require.Equal(t, 1, numDeleted)
SetPolicyEnabled(option.AlwaysEnforce)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.True(t, ing, "ingress policy enforcement should apply since ingress rule selects")
require.True(t, egr, "egress policy enforcement should apply since egress rules selects")
require.EqualValues(t, ruleSlice{}, matchingRules, "returned matching rules did not match")
SetPolicyEnabled(option.NeverEnforce)
_, _, err = repo.mustAdd(combinedRule)
require.NoError(t, err, "unable to add rule to policy repository")
ing, egr, matchingRules = repo.computePolicyEnforcementAndRules(fooIdentity)
require.False(t, ing, "ingress policy enforcement should not apply since policy enforcement is disabled ")
require.False(t, egr, "egress policy enforcement should not apply since policy enforcement is disabled")
require.Nil(t, matchingRules, "no rules should be returned since policy enforcement is disabled")
// Test init identity.
SetPolicyEnabled(option.DefaultEnforcement)
// If the mode is "default", check that the policy is always enforced for
// endpoints with the reserved:init label. If no policy rules match
// reserved:init, this drops all ingress and egress traffic.
ingress, egress, matchingRules := repo.computePolicyEnforcementAndRules(initIdentity)
require.True(t, ingress)
require.True(t, egress)
require.EqualValues(t, ruleSlice{}, matchingRules, "no rules should be returned since policy enforcement is disabled")
// Check that the "always" and "never" modes are not affected.
SetPolicyEnabled(option.AlwaysEnforce)
ingress, egress, _ = repo.computePolicyEnforcementAndRules(initIdentity)
require.True(t, ingress)
require.True(t, egress)
SetPolicyEnabled(option.NeverEnforce)
ingress, egress, _ = repo.computePolicyEnforcementAndRules(initIdentity)
require.False(t, ingress)
require.False(t, egress)
}
func BenchmarkParseLabel(b *testing.B) {
td := newTestData()
repo := td.repo
b.ResetTimer()
var err error
var cntAdd, cntFound int
lbls := make([]labels.LabelArray, 100)
for i := 0; i < 100; i++ {
I := fmt.Sprintf("%d", i)
lbls[i] = labels.LabelArray{labels.NewLabel("tag3", I, labels.LabelSourceK8s), labels.NewLabel("namespace", "default", labels.LabelSourceK8s)}
}
for i := 0; i < b.N; i++ {
for j := 0; j < 100; j++ {
J := fmt.Sprintf("%d", j)
_, _, err = repo.mustAdd(api.Rule{
EndpointSelector: api.NewESFromLabels(labels.NewLabel("foo", J, labels.LabelSourceK8s), labels.NewLabel("namespace", "default", labels.LabelSourceK8s)),
Labels: labels.LabelArray{
labels.ParseLabel("k8s:tag1"),
labels.NewLabel("namespace", "default", labels.LabelSourceK8s),
labels.NewLabel("tag3", J, labels.LabelSourceK8s),
},
})
if err == nil {
cntAdd++
}
}
repo.mutex.RLock()
for j := 0; j < 100; j++ {
cntFound += len(repo.searchRLocked(lbls[j]))
}
repo.mutex.RUnlock()
}
b.Log("Added: ", cntAdd)
b.Log("found: ", cntFound)
}
func TestAllowsIngress(t *testing.T) {
td := newTestData()
repo := td.repo
fooToBar := &SearchContext{
From: labels.ParseSelectLabelArray("foo"),
To: labels.ParseSelectLabelArray("bar"),
}
repo.mutex.RLock()
// no rules loaded: Allows() => denied
require.Equal(t, api.Denied, repo.AllowsIngressRLocked(fooToBar))
repo.mutex.RUnlock()
tag1 := labels.LabelArray{labels.ParseLabel("tag1")}
rule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("foo")),
},
},
},
},
Labels: tag1,
}
// selector: groupA
// require: groupA
rule2 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("groupA")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromRequires: []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("groupA")),
},
},
},
},
Labels: tag1,
}
rule3 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar2")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("foo")),
},
},
},
},
Labels: tag1,
}
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
_, _, err = repo.mustAdd(rule2)
require.NoError(t, err)
_, _, err = repo.mustAdd(rule3)
require.NoError(t, err)
// foo=>bar is OK
require.Equal(t, api.Allowed, repo.AllowsIngressRLocked(fooToBar))
// foo=>bar2 is OK
require.Equal(t, api.Allowed, repo.AllowsIngressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo"),
To: labels.ParseSelectLabelArray("bar2"),
}))
// foo=>bar inside groupA is OK
require.Equal(t, api.Allowed, repo.AllowsIngressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo", "groupA"),
To: labels.ParseSelectLabelArray("bar", "groupA"),
}))
// groupB can't talk to groupA => Denied
require.Equal(t, api.Denied, repo.AllowsIngressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo", "groupB"),
To: labels.ParseSelectLabelArray("bar", "groupA"),
}))
// no restriction on groupB, unused label => OK
require.Equal(t, api.Allowed, repo.AllowsIngressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo", "groupB"),
To: labels.ParseSelectLabelArray("bar", "groupB"),
}))
// foo=>bar3, no rule => Denied
require.Equal(t, api.Denied, repo.AllowsIngressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo"),
To: labels.ParseSelectLabelArray("bar3"),
}))
}
func TestAllowsEgress(t *testing.T) {
td := newTestData()
repo := td.repo
fooToBar := &SearchContext{
From: labels.ParseSelectLabelArray("foo"),
To: labels.ParseSelectLabelArray("bar"),
}
repo.mutex.RLock()
// no rules loaded: Allows() => denied
require.Equal(t, api.Denied, repo.AllowsEgressRLocked(fooToBar))
repo.mutex.RUnlock()
tag1 := labels.LabelArray{labels.ParseLabel("tag1")}
rule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("foo")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("bar")),
},
},
},
},
Labels: tag1,
}
// selector: groupA
// require: groupA
rule2 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("groupA")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToRequires: []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("groupA")),
},
},
},
},
Labels: tag1,
}
rule3 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("foo")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("bar2")),
},
},
},
},
Labels: tag1,
}
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
_, _, err = repo.mustAdd(rule2)
require.NoError(t, err)
_, _, err = repo.mustAdd(rule3)
require.NoError(t, err)
// foo=>bar is OK
logBuffer := new(bytes.Buffer)
result := repo.AllowsEgressRLocked(fooToBar.WithLogger(logBuffer))
if !assert.EqualValues(t, api.Allowed, result) {
t.Logf("%s", logBuffer.String())
t.Errorf("Resolved policy did not match expected: \n%s", err)
}
// foo=>bar2 is OK
require.Equal(t, api.Allowed, repo.AllowsEgressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo"),
To: labels.ParseSelectLabelArray("bar2"),
}))
// foo=>bar inside groupA is OK
require.Equal(t, api.Allowed, repo.AllowsEgressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo", "groupA"),
To: labels.ParseSelectLabelArray("bar", "groupA"),
}))
buffer := new(bytes.Buffer)
// groupB can't talk to groupA => Denied
ctx := &SearchContext{
To: labels.ParseSelectLabelArray("foo", "groupB"),
From: labels.ParseSelectLabelArray("bar", "groupA"),
Logging: stdlog.New(buffer, "", 0),
Trace: TRACE_VERBOSE,
}
verdict := repo.AllowsEgressRLocked(ctx)
require.Equal(t, api.Denied, verdict)
// no restriction on groupB, unused label => OK
require.Equal(t, api.Allowed, repo.AllowsEgressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo", "groupB"),
To: labels.ParseSelectLabelArray("bar", "groupB"),
}))
// foo=>bar3, no rule => Denied
require.Equal(t, api.Denied, repo.AllowsEgressRLocked(&SearchContext{
From: labels.ParseSelectLabelArray("foo"),
To: labels.ParseSelectLabelArray("bar3"),
}))
}
func TestWildcardL3RulesIngress(t *testing.T) {
td := newTestData()
repo := td.repo
labelsL3 := labels.LabelArray{labels.ParseLabel("L3")}
labelsKafka := labels.LabelArray{labels.ParseLabel("kafka")}
labelsICMP := labels.LabelArray{labels.ParseLabel("icmp")}
labelsICMPv6 := labels.LabelArray{labels.ParseLabel("icmpv6")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
labelsL7 := labels.LabelArray{labels.ParseLabel("l7")}
l3Rule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar1},
},
},
},
Labels: labelsL3,
}
l3Rule.Sanitize()
_, _, err := repo.mustAdd(l3Rule)
require.NoError(t, err)
kafkaRule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{APIKey: "produce"},
},
},
}},
},
},
Labels: labelsKafka,
}
kafkaRule.Sanitize()
_, _, err = repo.mustAdd(kafkaRule)
require.NoError(t, err)
httpRule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsHTTP,
}
_, _, err = repo.mustAdd(httpRule)
require.NoError(t, err)
l7Rule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9090", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "tester",
L7: []api.PortRuleL7{map[string]string{"method": "GET", "path": "/"}},
},
}},
},
},
Labels: labelsL7,
}
_, _, err = repo.mustAdd(l7Rule)
require.NoError(t, err)
icmpV4Type := intstr.FromInt(8)
icmpRule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV4Type,
}},
}},
},
},
Labels: labelsICMP,
}
_, _, err = repo.mustAdd(icmpRule)
require.NoError(t, err)
icmpV6Type := intstr.FromInt(128)
icmpV6Rule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV6Type,
Family: api.IPv6Family,
}},
}},
},
},
Labels: labelsICMPv6,
}
_, _, err = repo.mustAdd(icmpV6Rule)
require.NoError(t, err)
ctx := &SearchContext{
To: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
policy, err := repo.ResolveL4IngressPolicy(ctx)
require.NoError(t, err)
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0x0,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar1: {labelsL3}}),
},
"8/ICMP": {
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: 0x1,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsICMP}}),
},
"128/ICMPV6": {
Port: 128,
Protocol: api.ProtoICMPv6,
U8Proto: 0x3A,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsICMPv6}}),
},
"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeKafka,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{kafkaRule.Ingress[0].ToPorts[0].Rules.Kafka[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsKafka}}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeHTTP,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Ingress[0].ToPorts[0].Rules.HTTP[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsHTTP}}),
},
"9090/TCP": {
Port: 9090,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: L7ParserType("tester"),
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
L7Proto: "tester",
L7: []api.PortRuleL7{l7Rule.Ingress[0].ToPorts[0].Rules.L7[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsL7}}),
},
})
require.True(t, policy.TestingOnlyEquals(expectedPolicy), policy.TestingOnlyDiff(expectedPolicy))
policy.Detach(repo.GetSelectorCache())
}
func TestWildcardL4RulesIngress(t *testing.T) {
td := newTestData()
repo := td.repo
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar1 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar1"))
selBar2 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
labelsL4Kafka := labels.LabelArray{labels.ParseLabel("L4-kafka")}
labelsL7Kafka := labels.LabelArray{labels.ParseLabel("kafka")}
labelsL4HTTP := labels.LabelArray{labels.ParseLabel("L4-http")}
labelsL7HTTP := labels.LabelArray{labels.ParseLabel("http")}
l49092Rule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar1},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
}},
},
},
Labels: labelsL4Kafka,
}
l49092Rule.Sanitize()
_, _, err := repo.mustAdd(l49092Rule)
require.NoError(t, err)
kafkaRule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{APIKey: "produce"},
},
},
}},
},
},
Labels: labelsL7Kafka,
}
kafkaRule.Sanitize()
_, _, err = repo.mustAdd(kafkaRule)
require.NoError(t, err)
l480Rule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar1},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
Labels: labelsL4HTTP,
}
l480Rule.Sanitize()
_, _, err = repo.mustAdd(l480Rule)
require.NoError(t, err)
httpRule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsL7HTTP,
}
_, _, err = repo.mustAdd(httpRule)
require.NoError(t, err)
ctx := &SearchContext{
To: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
policy, err := repo.ResolveL4IngressPolicy(ctx)
require.NoError(t, err)
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeHTTP,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Ingress[0].ToPorts[0].Rules.HTTP[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorBar1: {labelsL4HTTP},
td.cachedSelectorBar2: {labelsL7HTTP},
}),
},
"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeKafka,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{kafkaRule.Ingress[0].ToPorts[0].Rules.Kafka[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorBar1: {labelsL4Kafka},
td.cachedSelectorBar2: {labelsL7Kafka},
}),
},
})
require.True(t, policy.TestingOnlyEquals(expectedPolicy), policy.TestingOnlyDiff(expectedPolicy))
policy.Detach(repo.GetSelectorCache())
}
func TestL3DependentL4IngressFromRequires(t *testing.T) {
td := newTestData()
repo := td.repo
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar1 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar1"))
selBar2 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
l480Rule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
selBar1,
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromRequires: []api.EndpointSelector{selBar2},
},
},
},
}
l480Rule.Sanitize()
_, _, err := repo.mustAdd(l480Rule)
require.NoError(t, err)
ctx := &SearchContext{
To: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
policy, err := repo.ResolveL4IngressPolicy(ctx)
require.NoError(t, err)
expectedSelector := api.NewESFromMatchRequirements(map[string]string{"any.id": "bar1"}, []slim_metav1.LabelSelectorRequirement{
{
Key: "any.id",
Operator: slim_metav1.LabelSelectorOpIn,
Values: []string{"bar2"},
},
})
expectedCachedSelector, _ := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, expectedSelector)
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
PerSelectorPolicies: L7DataMap{
expectedCachedSelector: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
expectedCachedSelector: {nil},
}),
},
})
require.Equal(t, expectedPolicy, policy)
policy.Detach(repo.GetSelectorCache())
}
func TestL3DependentL4EgressFromRequires(t *testing.T) {
td := newTestData()
repo := td.repo
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar1 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar1"))
selBar2 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
l480Rule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
selBar1,
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
ToRequires: []api.EndpointSelector{selBar2},
},
},
},
}
l480Rule.Sanitize()
_, _, err := repo.mustAdd(l480Rule)
require.NoError(t, err)
ctx := &SearchContext{
From: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
logBuffer := new(bytes.Buffer)
policy, err := repo.ResolveL4EgressPolicy(ctx.WithLogger(logBuffer))
require.NoError(t, err)
expectedSelector := api.NewESFromMatchRequirements(map[string]string{"any.id": "bar1"}, []slim_metav1.LabelSelectorRequirement{
{
Key: "any.id",
Operator: slim_metav1.LabelSelectorOpIn,
Values: []string{"bar2"},
},
})
expectedSelector2 := api.NewESFromMatchRequirements(map[string]string{}, []slim_metav1.LabelSelectorRequirement{
{
Key: "any.id",
Operator: slim_metav1.LabelSelectorOpIn,
Values: []string{"bar2"},
},
})
expectedCachedSelector, _ := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, expectedSelector)
expectedCachedSelector2, _ := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, expectedSelector2)
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/ANY": {
Port: 0,
Protocol: "ANY",
U8Proto: 0x0,
PerSelectorPolicies: L7DataMap{
expectedCachedSelector2: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
expectedCachedSelector2: {nil},
}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
PerSelectorPolicies: L7DataMap{
expectedCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
expectedCachedSelector: {nil},
}),
},
})
if !assert.True(t, policy.TestingOnlyEquals(expectedPolicy), policy.TestingOnlyDiff(expectedPolicy)) {
t.Errorf("Policy doesn't match expected:\n%s", logBuffer.String())
}
policy.Detach(repo.GetSelectorCache())
}
func TestWildcardL3RulesEgress(t *testing.T) {
td := newTestData()
repo := td.repo
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar1 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar1"))
selBar2 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
labelsL4 := labels.LabelArray{labels.ParseLabel("L4")}
labelsDNS := labels.LabelArray{labels.ParseLabel("dns")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
labelsICMP := labels.LabelArray{labels.ParseLabel("icmp")}
labelsICMPv6 := labels.LabelArray{labels.ParseLabel("icmpv6")}
l3Rule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar1},
},
},
},
Labels: labelsL4,
}
l3Rule.Sanitize()
_, _, err := repo.mustAdd(l3Rule)
require.NoError(t, err)
dnsRule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchName: "empire.gov"},
},
},
}},
},
},
Labels: labelsDNS,
}
dnsRule.Sanitize()
_, _, err = repo.mustAdd(dnsRule)
require.NoError(t, err)
httpRule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsHTTP,
}
_, _, err = repo.mustAdd(httpRule)
require.NoError(t, err)
icmpV4Type := intstr.FromInt(8)
icmpRule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV4Type,
}},
}},
},
},
Labels: labelsICMP,
}
_, _, err = repo.mustAdd(icmpRule)
require.NoError(t, err)
icmpV6Type := intstr.FromInt(128)
icmpV6Rule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &icmpV6Type,
Family: "IPv6",
}},
}},
},
},
Labels: labelsICMPv6,
}
_, _, err = repo.mustAdd(icmpV6Rule)
require.NoError(t, err)
ctx := &SearchContext{
From: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
logBuffer := new(bytes.Buffer)
policy, err := repo.ResolveL4EgressPolicy(ctx.WithLogger(logBuffer))
require.NoError(t, err)
// Traffic to bar1 should not be forwarded to the DNS or HTTP
// proxy at all, but if it is (e.g., for visibility, the
// "0/ANY" rule should allow such traffic through.
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"53/UDP": {
Port: 53,
Protocol: api.ProtoUDP,
U8Proto: 0x11,
L7Parser: ParserTypeDNS,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{dnsRule.Egress[0].ToPorts[0].Rules.DNS[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsDNS}}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeHTTP,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Egress[0].ToPorts[0].Rules.HTTP[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsHTTP}}),
},
"8/ICMP": {
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: 0x1,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsICMP}}),
},
"128/ICMPV6": {
Port: 128,
Protocol: api.ProtoICMPv6,
U8Proto: 0x3A,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsICMPv6}}),
},
"0/ANY": {
Port: 0,
Protocol: "ANY",
U8Proto: 0x0,
L7Parser: "",
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar1: {labelsL4}}),
},
})
if !assert.True(t, policy.TestingOnlyEquals(expectedPolicy), policy.TestingOnlyDiff(expectedPolicy)) {
t.Logf("%s", logBuffer.String())
t.Errorf("Resolved policy did not match expected: \n%s", err)
}
policy.Detach(repo.GetSelectorCache())
}
func TestWildcardL4RulesEgress(t *testing.T) {
td := newTestData()
repo := td.repo
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar1 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar1"))
selBar2 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
labelsL3DNS := labels.LabelArray{labels.ParseLabel("L3-dns")}
labelsL7DNS := labels.LabelArray{labels.ParseLabel("dns")}
labelsL3HTTP := labels.LabelArray{labels.ParseLabel("L3-http")}
labelsL7HTTP := labels.LabelArray{labels.ParseLabel("http")}
l453Rule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar1},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
}},
},
},
Labels: labelsL3DNS,
}
l453Rule.Sanitize()
_, _, err := repo.mustAdd(l453Rule)
require.NoError(t, err)
dnsRule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchName: "empire.gov"},
},
},
}},
},
},
Labels: labelsL7DNS,
}
dnsRule.Sanitize()
_, _, err = repo.mustAdd(dnsRule)
require.NoError(t, err)
l480Rule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar1},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
Labels: labelsL3HTTP,
}
l480Rule.Sanitize()
_, _, err = repo.mustAdd(l480Rule)
require.NoError(t, err)
httpRule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsL7HTTP,
}
_, _, err = repo.mustAdd(httpRule)
require.NoError(t, err)
ctx := &SearchContext{
From: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
logBuffer := new(bytes.Buffer)
policy, err := repo.ResolveL4EgressPolicy(ctx.WithLogger(logBuffer))
require.NoError(t, err)
// Bar1 should not be forwarded to the proxy, but if it is (e.g., for visibility),
// the L3/L4 allow should pass it without an explicit L7 wildcard.
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeHTTP,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Egress[0].ToPorts[0].Rules.HTTP[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorBar1: {labelsL3HTTP},
td.cachedSelectorBar2: {labelsL7HTTP},
}),
},
"53/UDP": {
Port: 53,
Protocol: api.ProtoUDP,
U8Proto: 0x11,
L7Parser: ParserTypeDNS,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar1: nil,
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{dnsRule.Egress[0].ToPorts[0].Rules.DNS[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorBar1: {labelsL3DNS},
td.cachedSelectorBar2: {labelsL7DNS},
}),
},
})
if !assert.True(t, policy.TestingOnlyEquals(expectedPolicy), policy.TestingOnlyDiff(expectedPolicy)) {
t.Logf("%s", logBuffer.String())
t.Error("Resolved policy did not match expected")
}
policy.Detach(repo.GetSelectorCache())
}
func TestWildcardCIDRRulesEgress(t *testing.T) {
td := newTestData()
repo := td.repo
labelsL3 := labels.LabelArray{labels.ParseLabel("L3")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
cidrSlice := api.CIDRSlice{"192.0.0.0/3"}
cidrSelectors := cidrSlice.GetAsEndpointSelectors()
var cachedSelectors CachedSelectorSlice
for i := range cidrSelectors {
c, _ := td.sc.AddIdentitySelector(dummySelectorCacheUser, EmptyStringLabels, cidrSelectors[i])
cachedSelectors = append(cachedSelectors, c)
defer td.sc.RemoveSelector(c, dummySelectorCacheUser)
}
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
l480Get := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{"192.0.0.0/3"},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{
Port: "80",
Protocol: api.ProtoTCP,
},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{
Headers: []string{"X-My-Header: true"},
Method: "GET",
Path: "/",
},
},
},
}},
},
},
Labels: labelsHTTP,
}
l480Get.Sanitize()
_, _, err := repo.mustAdd(l480Get)
require.NoError(t, err)
l3Rule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: api.CIDRSlice{"192.0.0.0/3"},
},
},
},
Labels: labelsL3,
}
l3Rule.Sanitize()
_, _, err = repo.mustAdd(l3Rule)
require.NoError(t, err)
ctx := &SearchContext{
From: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
logBuffer := new(bytes.Buffer)
policy, err := repo.ResolveL4EgressPolicy(ctx.WithLogger(logBuffer))
require.NoError(t, err)
// Port 80 policy does not need the wildcard, as the "0" port policy will allow the traffic.
// HTTP rules can have side-effects, so they need to be retained even if shadowed by a wildcard.
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeHTTP,
Ingress: false,
PerSelectorPolicies: L7DataMap{
cachedSelectors[0]: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{
Headers: []string{"X-My-Header: true"},
Method: "GET",
Path: "/",
}},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{cachedSelectors[0]: {labelsHTTP}}),
},
"0/ANY": {
Port: 0,
Protocol: api.ProtoAny,
U8Proto: 0x0,
L7Parser: ParserTypeNone,
Ingress: false,
PerSelectorPolicies: L7DataMap{
cachedSelectors[0]: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{cachedSelectors[0]: {labelsL3}}),
},
})
if !assert.True(t, policy.TestingOnlyEquals(expectedPolicy), policy.TestingOnlyDiff(expectedPolicy)) {
t.Logf("%s", logBuffer.String())
t.Error("Resolved policy did not match expected")
}
policy.Detach(repo.GetSelectorCache())
}
func TestWildcardL3RulesIngressFromEntities(t *testing.T) {
td := newTestData()
repo := td.repo
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar2 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
labelsL3 := labels.LabelArray{labels.ParseLabel("L3")}
labelsKafka := labels.LabelArray{labels.ParseLabel("kafka")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
l3Rule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: api.EntitySlice{api.EntityWorld},
},
},
},
Labels: labelsL3,
}
l3Rule.Sanitize()
_, _, err := repo.mustAdd(l3Rule)
require.NoError(t, err)
kafkaRule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{APIKey: "produce"},
},
},
}},
},
},
Labels: labelsKafka,
}
kafkaRule.Sanitize()
_, _, err = repo.mustAdd(kafkaRule)
require.NoError(t, err)
httpRule := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsHTTP,
}
_, _, err = repo.mustAdd(httpRule)
require.NoError(t, err)
ctx := &SearchContext{
To: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
policy, err := repo.ResolveL4IngressPolicy(ctx)
require.NoError(t, err)
require.Equal(t, 3, policy.Len())
selWorld := api.EntitySelectorMapping[api.EntityWorld][0]
require.Len(t, policy.ExactLookup("80", 0, "TCP").PerSelectorPolicies, 1)
cachedSelectorWorld := td.sc.FindCachedIdentitySelector(selWorld)
require.NotNil(t, cachedSelectorWorld)
cachedSelectorWorldV4 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv4])
require.NotNil(t, cachedSelectorWorldV4)
cachedSelectorWorldV6 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv6])
require.NotNil(t, cachedSelectorWorldV6)
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/ANY": {
Port: 0,
Protocol: "ANY",
U8Proto: 0x0,
L7Parser: "",
PerSelectorPolicies: L7DataMap{
cachedSelectorWorld: nil,
cachedSelectorWorldV4: nil,
cachedSelectorWorldV6: nil,
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
cachedSelectorWorld: {labelsL3},
cachedSelectorWorldV4: {labelsL3},
cachedSelectorWorldV6: {labelsL3},
}),
},
"9092/TCP": {
Port: 9092,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeKafka,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{kafkaRule.Ingress[0].ToPorts[0].Rules.Kafka[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsKafka}}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeHTTP,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Ingress[0].ToPorts[0].Rules.HTTP[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsHTTP}}),
},
})
require.True(t, policy.TestingOnlyEquals(expectedPolicy), policy.TestingOnlyDiff(expectedPolicy))
policy.Detach(repo.GetSelectorCache())
}
func TestWildcardL3RulesEgressToEntities(t *testing.T) {
td := newTestData()
repo := td.repo
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
selBar2 := api.NewESFromLabels(labels.ParseSelectLabel("id=bar2"))
labelsL3 := labels.LabelArray{labels.ParseLabel("L3")}
labelsDNS := labels.LabelArray{labels.ParseLabel("dns")}
labelsHTTP := labels.LabelArray{labels.ParseLabel("http")}
l3Rule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: api.EntitySlice{api.EntityWorld},
},
},
},
Labels: labelsL3,
}
l3Rule.Sanitize()
_, _, err := repo.mustAdd(l3Rule)
require.NoError(t, err)
dnsRule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "53", Protocol: api.ProtoUDP},
},
Rules: &api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchName: "empire.gov"},
},
},
}},
},
},
Labels: labelsDNS,
}
dnsRule.Sanitize()
_, _, err = repo.mustAdd(dnsRule)
require.NoError(t, err)
httpRule := api.Rule{
EndpointSelector: selFoo,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{selBar2},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Labels: labelsHTTP,
}
_, _, err = repo.mustAdd(httpRule)
require.NoError(t, err)
ctx := &SearchContext{
From: labels.ParseSelectLabelArray("id=foo"),
}
repo.mutex.RLock()
defer repo.mutex.RUnlock()
policy, err := repo.ResolveL4EgressPolicy(ctx)
require.NoError(t, err)
require.Equal(t, 3, policy.Len())
selWorld := api.EntitySelectorMapping[api.EntityWorld][0]
require.Len(t, policy.ExactLookup("80", 0, "TCP").PerSelectorPolicies, 1)
cachedSelectorWorld := td.sc.FindCachedIdentitySelector(selWorld)
require.NotNil(t, cachedSelectorWorld)
cachedSelectorWorldV4 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv4])
require.NotNil(t, cachedSelectorWorldV4)
cachedSelectorWorldV6 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv6])
require.NotNil(t, cachedSelectorWorldV6)
expectedPolicy := NewL4PolicyMapWithValues(map[string]*L4Filter{
"0/ANY": {
Port: 0,
Protocol: "ANY",
U8Proto: 0x0,
L7Parser: "",
PerSelectorPolicies: L7DataMap{
cachedSelectorWorld: nil,
cachedSelectorWorldV4: nil,
cachedSelectorWorldV6: nil,
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
cachedSelectorWorld: {labelsL3},
cachedSelectorWorldV4: {labelsL3},
cachedSelectorWorldV6: {labelsL3},
}),
},
"53/UDP": {
Port: 53,
Protocol: api.ProtoUDP,
U8Proto: 0x11,
L7Parser: ParserTypeDNS,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{dnsRule.Egress[0].ToPorts[0].Rules.DNS[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsDNS}}),
},
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeHTTP,
Ingress: false,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorBar2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{httpRule.Egress[0].ToPorts[0].Rules.HTTP[0]},
},
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorBar2: {labelsHTTP}}),
},
})
require.True(t, policy.TestingOnlyEquals(expectedPolicy), policy.TestingOnlyDiff(expectedPolicy))
policy.Detach(repo.GetSelectorCache())
}
func TestMinikubeGettingStarted(t *testing.T) {
td := newTestData()
repo := td.repo
app2Selector := labels.ParseSelectLabelArray("id=app2")
fromApp2 := &SearchContext{
From: app2Selector,
To: labels.ParseSelectLabelArray("id=app1"),
Trace: TRACE_VERBOSE,
}
fromApp3 := &SearchContext{
From: labels.ParseSelectLabelArray("id=app3"),
To: labels.ParseSelectLabelArray("id=app1"),
}
repo.mutex.RLock()
// no rules loaded: Allows() => denied
require.Equal(t, api.Denied, repo.AllowsIngressRLocked(fromApp2))
require.Equal(t, api.Denied, repo.AllowsIngressRLocked(fromApp3))
repo.mutex.RUnlock()
selFromApp2 := api.NewESFromLabels(
labels.ParseSelectLabel("id=app2"),
)
selectorFromApp2 := []api.EndpointSelector{
selFromApp2,
}
_, _, err := repo.mustAdd(api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("id=app1")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: selectorFromApp2,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
})
require.NoError(t, err)
_, _, err = repo.mustAdd(api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("id=app1")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: selectorFromApp2,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
})
require.NoError(t, err)
_, _, err = repo.mustAdd(api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("id=app1")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: selectorFromApp2,
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
})
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
// L4 from app2 is restricted
logBuffer := new(bytes.Buffer)
l4IngressPolicy, err := repo.ResolveL4IngressPolicy(fromApp2.WithLogger(logBuffer))
require.NoError(t, err)
cachedSelectorApp2 := td.sc.FindCachedIdentitySelector(selFromApp2)
require.NotNil(t, cachedSelectorApp2)
expected := NewL4Policy(repo.GetRevision())
expected.Ingress.PortRules.Upsert("80", 0, "TCP", &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
cachedSelectorApp2: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET", Path: "/"}, {}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{cachedSelectorApp2: {nil}}),
})
if !assert.EqualValues(t, expected.Ingress.PortRules, l4IngressPolicy) {
t.Logf("%s", logBuffer.String())
t.Errorf("Resolved policy did not match expected")
}
l4IngressPolicy.Detach(td.sc)
expected.Detach(td.sc)
// L4 from app3 has no rules
expected = NewL4Policy(repo.GetRevision())
l4IngressPolicy, err = repo.ResolveL4IngressPolicy(fromApp3)
require.NoError(t, err)
require.Equal(t, 0, l4IngressPolicy.Len())
require.Equal(t, expected.Ingress.PortRules, l4IngressPolicy)
l4IngressPolicy.Detach(td.sc)
expected.Detach(td.sc)
}
func buildSearchCtx(from, to string, port uint16) *SearchContext {
ports := []*models.Port{{Port: port, Protocol: string(api.ProtoAny)}}
return &SearchContext{
From: labels.ParseSelectLabelArray(from),
To: labels.ParseSelectLabelArray(to),
DPorts: ports,
Trace: TRACE_ENABLED,
}
}
func buildRule(from, to, port string) api.Rule {
reservedES := api.NewESFromLabels(labels.ParseSelectLabel("reserved:host"))
fromES := api.NewESFromLabels(labels.ParseSelectLabel(from))
toES := api.NewESFromLabels(labels.ParseSelectLabel(to))
ports := []api.PortRule{}
if port != "" {
ports = []api.PortRule{
{Ports: []api.PortProtocol{{Port: port}}},
}
}
return api.Rule{
EndpointSelector: toES,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
reservedES,
fromES,
},
},
ToPorts: ports,
},
},
}
}
func (repo *Repository) checkTrace(t *testing.T, ctx *SearchContext, trace string,
expectedVerdict api.Decision) {
buffer := new(bytes.Buffer)
ctx.Logging = stdlog.New(buffer, "", 0)
repo.mutex.RLock()
verdict := repo.AllowsIngressRLocked(ctx)
repo.mutex.RUnlock()
expectedOut := "Tracing " + ctx.String() + "\n" + trace
require.EqualValues(t, expectedOut, buffer.String())
require.Equal(t, expectedVerdict, verdict)
}
func TestPolicyTrace(t *testing.T) {
td := newTestData()
repo := td.repo
// Add rules to allow foo=>bar
l3rule := buildRule("foo", "bar", "")
rules := api.Rules{&l3rule}
_, _ = repo.MustAddList(rules)
// foo=>bar is OK
expectedOut := `
Resolving ingress policy for [any:bar]
* Rule {"matchLabels":{"any:bar":""}}: selected
Allows from labels {"matchLabels":{"reserved:host":""}}
Allows from labels {"matchLabels":{"any:foo":""}}
Found all required labels
1/1 rules selected
Found allow rule
Found no deny rule
Ingress verdict: allowed
`
ctx := buildSearchCtx("foo", "bar", 0)
repo.checkTrace(t, ctx, expectedOut, api.Allowed)
// foo=>bar:80 is OK
ctx = buildSearchCtx("foo", "bar", 80)
repo.checkTrace(t, ctx, expectedOut, api.Allowed)
// bar=>foo is Denied
ctx = buildSearchCtx("bar", "foo", 0)
expectedOut = `
Resolving ingress policy for [any:foo]
0/1 rules selected
Found no allow rule
Found no deny rule
Ingress verdict: denied
`
repo.checkTrace(t, ctx, expectedOut, api.Denied)
// bar=>foo:80 is also Denied by the same logic
ctx = buildSearchCtx("bar", "foo", 80)
repo.checkTrace(t, ctx, expectedOut, api.Denied)
// Now, add extra rules to allow specifically baz=>bar on port 80
l4rule := buildRule("baz", "bar", "80")
_, _, err := repo.mustAdd(l4rule)
require.NoError(t, err)
// baz=>bar:80 is OK
ctx = buildSearchCtx("baz", "bar", 80)
expectedOut = `
Resolving ingress policy for [any:bar]
* Rule {"matchLabels":{"any:bar":""}}: selected
Allows from labels {"matchLabels":{"reserved:host":""}}
Allows from labels {"matchLabels":{"any:foo":""}}
No label match for [any:baz]
* Rule {"matchLabels":{"any:bar":""}}: selected
Allows from labels {"matchLabels":{"reserved:host":""}}
Allows from labels {"matchLabels":{"any:baz":""}}
Found all required labels
Allows port [{80 0 ANY}]
2/2 rules selected
Found allow rule
Found no deny rule
Ingress verdict: allowed
`
repo.checkTrace(t, ctx, expectedOut, api.Allowed)
// bar=>bar:80 is Denied
ctx = buildSearchCtx("bar", "bar", 80)
expectedOut = `
Resolving ingress policy for [any:bar]
* Rule {"matchLabels":{"any:bar":""}}: selected
Allows from labels {"matchLabels":{"reserved:host":""}}
Allows from labels {"matchLabels":{"any:foo":""}}
No label match for [any:bar]
* Rule {"matchLabels":{"any:bar":""}}: selected
Allows from labels {"matchLabels":{"reserved:host":""}}
Allows from labels {"matchLabels":{"any:baz":""}}
No label match for [any:bar]
2/2 rules selected
Found no allow rule
Found no deny rule
Ingress verdict: denied
`
repo.checkTrace(t, ctx, expectedOut, api.Denied)
// Test that FromRequires "baz" drops "foo" traffic
l3rule = api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromRequires: []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("baz")),
},
},
}},
}
_, _, err = repo.mustAdd(l3rule)
require.NoError(t, err)
// foo=>bar is now denied due to the FromRequires
ctx = buildSearchCtx("foo", "bar", 0)
expectedOut = `
Resolving ingress policy for [any:bar]
* Rule {"matchLabels":{"any:bar":""}}: selected
Enforcing requirements [{Key:any.baz Operator:In Values:[]}]
Allows from labels {"matchLabels":{"reserved:host":""},"matchExpressions":[{"key":"any:baz","operator":"In","values":[""]}]}
Allows from labels {"matchLabels":{"any:foo":""},"matchExpressions":[{"key":"any:baz","operator":"In","values":[""]}]}
No label match for [any:foo]
* Rule {"matchLabels":{"any:bar":""}}: selected
Enforcing requirements [{Key:any.baz Operator:In Values:[]}]
Allows from labels {"matchLabels":{"reserved:host":""},"matchExpressions":[{"key":"any:baz","operator":"In","values":[""]}]}
Allows from labels {"matchLabels":{"any:baz":""},"matchExpressions":[{"key":"any:baz","operator":"In","values":[""]}]}
No label match for [any:foo]
* Rule {"matchLabels":{"any:bar":""}}: selected
3/3 rules selected
Found no allow rule
Found no deny rule
Ingress verdict: denied
`
repo.checkTrace(t, ctx, expectedOut, api.Denied)
// baz=>bar is only denied because of the L4 policy
ctx = buildSearchCtx("baz", "bar", 0)
expectedOut = `
Resolving ingress policy for [any:bar]
* Rule {"matchLabels":{"any:bar":""}}: selected
Enforcing requirements [{Key:any.baz Operator:In Values:[]}]
Allows from labels {"matchLabels":{"reserved:host":""},"matchExpressions":[{"key":"any:baz","operator":"In","values":[""]}]}
Allows from labels {"matchLabels":{"any:foo":""},"matchExpressions":[{"key":"any:baz","operator":"In","values":[""]}]}
No label match for [any:baz]
* Rule {"matchLabels":{"any:bar":""}}: selected
Enforcing requirements [{Key:any.baz Operator:In Values:[]}]
Allows from labels {"matchLabels":{"reserved:host":""},"matchExpressions":[{"key":"any:baz","operator":"In","values":[""]}]}
Allows from labels {"matchLabels":{"any:baz":""},"matchExpressions":[{"key":"any:baz","operator":"In","values":[""]}]}
Found all required labels
Allows port [{80 0 ANY}]
No port match found
* Rule {"matchLabels":{"any:bar":""}}: selected
3/3 rules selected
Found no allow rule
Found no deny rule
Ingress verdict: denied
`
repo.checkTrace(t, ctx, expectedOut, api.Denied)
// Should still be allowed with the new FromRequires constraint
ctx = buildSearchCtx("baz", "bar", 80)
repo.mutex.RLock()
verdict := repo.AllowsIngressRLocked(ctx)
repo.mutex.RUnlock()
require.Equal(t, api.Allowed, verdict)
}
func TestIterate(t *testing.T) {
td := newTestData()
repo := td.repo
numWithEgress := 0
countEgressRules := func(r *api.Rule) {
if len(r.Egress) > 0 {
numWithEgress++
}
}
repo.Iterate(countEgressRules)
require.Equal(t, 0, numWithEgress)
numRules := 10
lbls := make([]labels.Label, 10)
for i := 0; i < numRules; i++ {
it := fmt.Sprintf("baz%d", i)
epSelector := api.NewESFromLabels(
labels.NewLabel(
"foo",
it,
labels.LabelSourceK8s,
),
)
lbls[i] = labels.NewLabel("tag3", it, labels.LabelSourceK8s)
_, _, err := repo.mustAdd(api.Rule{
EndpointSelector: epSelector,
Labels: labels.LabelArray{lbls[i]},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
epSelector,
},
},
},
},
})
require.NoError(t, err)
}
numWithEgress = 0
repo.Iterate(countEgressRules)
require.Equal(t, numRules, numWithEgress)
numModified := 0
modifyRules := func(r *api.Rule) {
if r.Labels.Contains(labels.LabelArray{lbls[1]}) || r.Labels.Contains(labels.LabelArray{lbls[3]}) {
r.Egress = nil
numModified++
}
}
repo.Iterate(modifyRules)
require.Equal(t, 2, numModified)
numWithEgress = 0
repo.Iterate(countEgressRules)
require.Equal(t, numRules-numModified, numWithEgress)
_, _, numDeleted := repo.ReplaceByLabels(nil, []labels.LabelArray{{lbls[0]}})
require.Equal(t, 1, numDeleted)
numWithEgress = 0
repo.Iterate(countEgressRules)
require.Equal(t, numRules-numModified-numDeleted, numWithEgress)
}
// TestDefaultAllow covers the defaulting logic in determining an identity's default rule
// in the presence or absence of rules that do not enable default-deny mode.
func TestDefaultAllow(t *testing.T) {
// Cache policy enforcement value from when test was ran to avoid pollution
// across tests.
oldPolicyEnable := GetPolicyEnabled()
defer SetPolicyEnabled(oldPolicyEnable)
SetPolicyEnabled(option.DefaultEnforcement)
fooSelectLabel := labels.ParseSelectLabel("foo")
genRule := func(ingress, defaultDeny bool) api.Rule {
name := fmt.Sprintf("%v_%v", ingress, defaultDeny)
r := api.Rule{
EndpointSelector: api.NewESFromLabels(fooSelectLabel),
Labels: labels.LabelArray{labels.NewLabel(k8sConst.PolicyLabelName, name, labels.LabelSourceAny)},
}
if ingress {
r.Ingress = []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.NewESFromLabels(fooSelectLabel)}}}}
} else {
r.Egress = []api.EgressRule{{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{api.NewESFromLabels(fooSelectLabel)}}}}
}
if ingress {
r.EnableDefaultDeny.Ingress = &defaultDeny
} else {
r.EnableDefaultDeny.Egress = &defaultDeny
}
require.NoError(t, r.Sanitize())
return r
}
iDeny := genRule(true, true) // ingress default deny
iAllow := genRule(true, false) // ingress default allow
eDeny := genRule(false, true) // egress default deny
eAllow := genRule(false, false) // egress default allow
type testCase struct {
rules []api.Rule
ingress, egress bool
ruleC int // count of rules; indicates wildcard
}
ingressCases := []testCase{
{
rules: nil, // default case, everything disabled
},
{
rules: []api.Rule{iDeny},
ingress: true,
ruleC: 1,
},
{
rules: []api.Rule{iAllow}, // Just a default-allow rule
ingress: true,
ruleC: 2, // wildcard must be added
},
{
rules: []api.Rule{iDeny, iAllow}, // default-deny takes precedence, no wildcard
ingress: true,
ruleC: 2,
},
}
egressCases := []testCase{
{
rules: nil, // default case, everything disabled
},
{
rules: []api.Rule{eDeny},
egress: true,
ruleC: 1,
},
{
rules: []api.Rule{eAllow}, // Just a default-allow rule
egress: true,
ruleC: 2, // wildcard must be added
},
{
rules: []api.Rule{eDeny, eAllow}, // default-deny takes precedence, no wildcard
egress: true,
ruleC: 2,
},
}
// three test runs: ingress, egress, and ingress + egress cartesian
for i, tc := range ingressCases {
td := newTestData()
td.addIdentity(fooIdentity)
repo := td.repo
for _, rule := range tc.rules {
_, _, err := repo.mustAdd(rule)
require.NoError(t, err, "unable to add rule to policy repository")
}
ing, egr, matchingRules := repo.computePolicyEnforcementAndRules(fooIdentity)
require.Equal(t, tc.ingress, ing, "case %d: ingress should match", i)
require.Equal(t, tc.egress, egr, "case %d: egress should match", i)
require.Len(t, matchingRules, tc.ruleC, "case %d: rule count should match", i)
}
for i, tc := range egressCases {
td := newTestData()
td.addIdentity(fooIdentity)
repo := td.repo
for _, rule := range tc.rules {
_, _, err := repo.mustAdd(rule)
require.NoError(t, err, "unable to add rule to policy repository")
}
ing, egr, matchingRules := repo.computePolicyEnforcementAndRules(fooIdentity)
require.Equal(t, tc.ingress, ing, "case %d: ingress should match", i)
require.Equal(t, tc.egress, egr, "case %d: egress should match", i)
require.Len(t, matchingRules, tc.ruleC, "case %d: rule count should match", i)
}
// test all combinations of ingress + egress cases
for e, etc := range egressCases {
for i, itc := range ingressCases {
td := newTestData()
td.addIdentity(fooIdentity)
repo := td.repo
for _, rule := range etc.rules {
_, _, err := repo.mustAdd(rule)
require.NoError(t, err, "unable to add rule to policy repository")
}
for _, rule := range itc.rules {
_, _, err := repo.mustAdd(rule)
require.NoError(t, err, "unable to add rule to policy repository")
}
ing, egr, matchingRules := repo.computePolicyEnforcementAndRules(fooIdentity)
require.Equal(t, itc.ingress, ing, "case ingress %d + egress %d: ingress should match", i, e)
require.Equal(t, etc.egress, egr, "case ingress %d + egress %d: egress should match", i, e)
require.Len(t, matchingRules, itc.ruleC+etc.ruleC, "case ingress %d + egress %d: rule count should match", i, e)
}
}
}
func TestReplaceByResource(t *testing.T) {
// don't use the full testdata() here, since we want to watch
// selectorcache changes carefully
repo := NewPolicyRepository(nil, nil, nil, nil, api.NewPolicyMetricsNoop())
sc := testNewSelectorCache(nil)
repo.selectorCache = sc
assert.Empty(t, sc.selectors)
// create 10 rules, each with a subject selector that selects one identity.
numRules := 10
rules := make(api.Rules, 0, numRules)
ids := identity.IdentityMap{}
// share the dest selector
destSelector := api.NewESFromLabels(labels.NewLabel("peer", "pod", "k8s"))
for i := range numRules {
it := fmt.Sprintf("num-%d", i)
ids[identity.NumericIdentity(i+100)] = labels.LabelArray{labels.Label{
Source: labels.LabelSourceK8s,
Key: "subject-pod",
Value: it,
}}
epSelector := api.NewESFromLabels(
labels.NewLabel(
"subject-pod",
it,
labels.LabelSourceK8s,
),
)
lbl := labels.NewLabel("policy-label", it, labels.LabelSourceK8s)
rule := &api.Rule{
EndpointSelector: epSelector,
Labels: labels.LabelArray{lbl},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
destSelector,
},
},
},
},
}
require.NoError(t, rule.Sanitize())
rules = append(rules, rule)
}
sc.UpdateIdentities(ids, nil, &sync.WaitGroup{})
rulesMatch := func(s ruleSlice, rs api.Rules) {
t.Helper()
ss := make(api.Rules, 0, len(s))
for _, rule := range s {
ss = append(ss, &rule.Rule)
}
assert.ElementsMatch(t, ss, rs)
}
toSlice := func(m map[ruleKey]*rule) ruleSlice {
out := ruleSlice{}
for _, v := range m {
out = append(out, v)
}
return out
}
rID1 := ipcachetypes.ResourceID("res1")
rID2 := ipcachetypes.ResourceID("res2")
affectedIDs, rev, oldRuleCnt := repo.ReplaceByResource(rules[0:1], rID1)
assert.ElementsMatch(t, []identity.NumericIdentity{100}, affectedIDs.AsSlice())
assert.EqualValues(t, 2, rev)
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, repo.rulesByResource, 1)
assert.Len(t, repo.rulesByResource[rID1], 1)
rulesMatch(toSlice(repo.rulesByResource[rID1]), rules[0:1])
// Check that the selectorcache is sane
// It should have one selector: the subject pod for rule 0
assert.Len(t, sc.selectors, 1)
// add second resource with rules 1, 2
affectedIDs, rev, oldRuleCnt = repo.ReplaceByResource(rules[1:3], rID2)
assert.ElementsMatch(t, []identity.NumericIdentity{101, 102}, affectedIDs.AsSlice())
assert.EqualValues(t, 3, rev)
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 3)
assert.Len(t, repo.rulesByResource, 2)
assert.Len(t, repo.rulesByResource[rID1], 1)
assert.Len(t, repo.rulesByResource[rID2], 2)
assert.Len(t, sc.selectors, 3)
// replace rid1 with rules 3, 4.
// affected IDs should be 100, 103, 104 (for outgoing)
affectedIDs, rev, oldRuleCnt = repo.ReplaceByResource(rules[3:5], rID1)
assert.ElementsMatch(t, []identity.NumericIdentity{100, 103, 104}, affectedIDs.AsSlice())
assert.EqualValues(t, 4, rev)
assert.Equal(t, 1, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 4)
assert.Len(t, repo.rulesByResource, 2)
assert.Len(t, repo.rulesByResource[rID1], 2)
assert.Len(t, repo.rulesByResource[rID2], 2)
assert.Len(t, sc.selectors, 4)
rulesMatch(toSlice(repo.rulesByResource[rID1]), rules[3:5])
assert.Equal(t, repo.rules[ruleKey{
resource: rID1,
idx: 0,
}].Rule, *rules[3])
// delete rid1
affectedIDs, _, oldRuleCnt = repo.ReplaceByResource(nil, rID1)
assert.Len(t, repo.rules, 2)
assert.Len(t, repo.rulesByResource, 1)
assert.Len(t, repo.rulesByResource[rID2], 2)
assert.Len(t, sc.selectors, 2)
assert.Equal(t, 2, oldRuleCnt)
assert.ElementsMatch(t, []identity.NumericIdentity{103, 104}, affectedIDs.AsSlice())
// delete rid1 again (noop)
affectedIDs, _, oldRuleCnt = repo.ReplaceByResource(nil, rID1)
assert.Empty(t, affectedIDs.AsSlice())
assert.Len(t, repo.rules, 2)
assert.Len(t, repo.rulesByResource, 1)
assert.Len(t, repo.rulesByResource[rID2], 2)
assert.Len(t, sc.selectors, 2)
assert.Equal(t, 0, oldRuleCnt)
// delete rid2
affectedIDs, _, oldRuleCnt = repo.ReplaceByResource(nil, rID2)
assert.ElementsMatch(t, []identity.NumericIdentity{101, 102}, affectedIDs.AsSlice())
assert.Empty(t, repo.rules)
assert.Empty(t, repo.rulesByResource)
assert.Empty(t, sc.selectors)
assert.Equal(t, 2, oldRuleCnt)
}
func TestReplaceByLabels(t *testing.T) {
// don't use the full testdata() here, since we want to watch
// selectorcache changes carefully
repo := NewPolicyRepository(nil, nil, nil, nil, api.NewPolicyMetricsNoop())
sc := testNewSelectorCache(nil)
repo.selectorCache = sc
assert.Empty(t, sc.selectors)
// create 10 rules, each with a subject selector that selects one identity.
numRules := 10
rules := make(api.Rules, 0, numRules)
ids := identity.IdentityMap{}
ruleLabels := make([]labels.LabelArray, 0, numRules)
// share the dest selector
destSelector := api.NewESFromLabels(labels.NewLabel("peer", "pod", "k8s"))
for i := range numRules {
it := fmt.Sprintf("num-%d", i)
ids[identity.NumericIdentity(i+100)] = labels.LabelArray{labels.Label{
Source: labels.LabelSourceK8s,
Key: "subject-pod",
Value: it,
}}
epSelector := api.NewESFromLabels(
labels.NewLabel(
"subject-pod",
it,
labels.LabelSourceK8s,
),
)
lbl := labels.NewLabel("policy-label", it, labels.LabelSourceK8s)
rule := &api.Rule{
EndpointSelector: epSelector,
Labels: labels.LabelArray{lbl},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
destSelector,
},
},
},
},
}
require.NoError(t, rule.Sanitize())
rules = append(rules, rule)
ruleLabels = append(ruleLabels, rule.Labels)
}
sc.UpdateIdentities(ids, nil, &sync.WaitGroup{})
rulesMatch := func(s ruleSlice, rs api.Rules) {
t.Helper()
ss := make(api.Rules, 0, len(s))
for _, rule := range s {
ss = append(ss, &rule.Rule)
}
assert.ElementsMatch(t, ss, rs)
}
_ = rulesMatch
toSlice := func(m map[ruleKey]*rule) ruleSlice {
out := ruleSlice{}
for _, v := range m {
out = append(out, v)
}
return out
}
_ = toSlice
affectedIDs, rev, oldRuleCnt := repo.ReplaceByLabels(rules[0:1], ruleLabels[0:1])
assert.ElementsMatch(t, []identity.NumericIdentity{100}, affectedIDs.AsSlice())
assert.EqualValues(t, 2, rev)
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, sc.selectors, 1)
// Replace rule 0 with rule 1
affectedIDs, rev, oldRuleCnt = repo.ReplaceByLabels(rules[1:2], ruleLabels[0:1])
assert.ElementsMatch(t, []identity.NumericIdentity{100, 101}, affectedIDs.AsSlice())
assert.EqualValues(t, 3, rev)
assert.Equal(t, 1, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, sc.selectors, 1)
// Add rules 2, 3
affectedIDs, rev, oldRuleCnt = repo.ReplaceByLabels(rules[2:4], ruleLabels[2:4])
assert.ElementsMatch(t, []identity.NumericIdentity{102, 103}, affectedIDs.AsSlice())
assert.EqualValues(t, 4, rev)
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 3)
assert.Len(t, sc.selectors, 3)
// Delete rules 2, 3
affectedIDs, rev, oldRuleCnt = repo.ReplaceByLabels(nil, ruleLabels[2:4])
assert.ElementsMatch(t, []identity.NumericIdentity{102, 103}, affectedIDs.AsSlice())
assert.EqualValues(t, 5, rev)
assert.Equal(t, 2, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, sc.selectors, 1)
// delete rules 2, 3 again
affectedIDs, _, oldRuleCnt = repo.ReplaceByLabels(nil, ruleLabels[2:4])
assert.ElementsMatch(t, []identity.NumericIdentity{}, affectedIDs.AsSlice())
assert.Equal(t, 0, oldRuleCnt)
// check basic bookkeeping
assert.Len(t, repo.rules, 1)
assert.Len(t, sc.selectors, 1)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"errors"
"fmt"
"iter"
"runtime"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/u8proto"
)
// SelectorPolicy represents a selectorPolicy, previously resolved from
// the policy repository and ready to be distilled against a set of identities
// to compute datapath-level policy configuration.
type SelectorPolicy interface {
// CreateRedirects is used to ensure the endpoint has created all the needed redirects
// before a new EndpointPolicy is created.
RedirectFilters() iter.Seq2[*L4Filter, *PerSelectorPolicy]
// DistillPolicy returns the policy in terms of connectivity to peer
// Identities.
DistillPolicy(owner PolicyOwner, redirects map[string]uint16) *EndpointPolicy
}
// selectorPolicy is a structure which contains the resolved policy for a
// particular Identity across all layers (L3, L4, and L7), with the policy
// still determined in terms of EndpointSelectors.
type selectorPolicy struct {
// Revision is the revision of the policy repository used to generate
// this selectorPolicy.
Revision uint64
// SelectorCache managing selectors in L4Policy
SelectorCache *SelectorCache
// L4Policy contains the computed L4 and L7 policy.
L4Policy L4Policy
// IngressPolicyEnabled specifies whether this policy contains any policy
// at ingress.
IngressPolicyEnabled bool
// EgressPolicyEnabled specifies whether this policy contains any policy
// at egress.
EgressPolicyEnabled bool
}
func (p *selectorPolicy) Attach(ctx PolicyContext) {
p.L4Policy.Attach(ctx)
}
// EndpointPolicy is a structure which contains the resolved policy across all
// layers (L3, L4, and L7), distilled against a set of identities.
type EndpointPolicy struct {
// Note that all Endpoints sharing the same identity will be
// referring to a shared selectorPolicy!
*selectorPolicy
// VersionHandle represents the version of the SelectorCache 'policyMapState' was generated
// from.
// Changes after this version appear in 'policyMapChanges'.
// This is updated when incremental changes are applied.
VersionHandle *versioned.VersionHandle
// policyMapState contains the state of this policy as it relates to the
// datapath. In the future, this will be factored out of this object to
// decouple the policy as it relates to the datapath vs. its userspace
// representation.
// It maps each Key to the proxy port if proxy redirection is needed.
// Proxy port 0 indicates no proxy redirection.
// All fields within the Key and the proxy port must be in host byte-order.
// Must only be accessed with PolicyOwner (aka Endpoint) lock taken.
policyMapState mapState
// policyMapChanges collects pending changes to the PolicyMapState
policyMapChanges MapChanges
// PolicyOwner describes any type which consumes this EndpointPolicy object.
PolicyOwner PolicyOwner
// Redirects contains the proxy ports needed for this EndpointPolicy.
// If any redirects are missing a new policy will be computed to rectify it, so this is
// constant for the lifetime of this EndpointPolicy.
Redirects map[string]uint16
}
// LookupRedirectPort returns the redirect L4 proxy port for the given input parameters.
// Returns 0 if not found or the filter doesn't require a redirect.
// Returns an error if the redirect port can not be found.
// This is called when accumulating incremental map changes, endpoint lock must not be taken.
func (p *EndpointPolicy) LookupRedirectPort(ingress bool, protocol string, port uint16, listener string) (uint16, error) {
proxyID := ProxyID(uint16(p.PolicyOwner.GetID()), ingress, protocol, port, listener)
if proxyPort, exists := p.Redirects[proxyID]; exists {
return proxyPort, nil
}
return 0, fmt.Errorf("Proxy port for redirect %q not found", proxyID)
}
// Lookup finds the policy verdict applicable to the given 'key' using the same precedence logic
// between L3 and L4-only policies like the bpf datapath when both match the given 'key'.
// To be used in testing in place of the bpf datapath when full integration testing is not desired.
// Returns the closest matching covering policy entry, the labels of the rules that contributed to
// that verdict, and 'true' if found.
// Returns a deny entry when a match is not found, mirroring the datapath default deny behavior.
// 'key' must not have a wildcard identity or port.
func (p *EndpointPolicy) Lookup(key Key) (MapStateEntry, labels.LabelArrayList, bool) {
entry, found := p.policyMapState.lookup(key)
lbls := labels.LabelArrayListFromString(entry.derivedFromRules.Value())
return entry.MapStateEntry, lbls, found
}
// PolicyOwner is anything which consumes a EndpointPolicy.
type PolicyOwner interface {
GetID() uint64
GetNamedPort(ingress bool, name string, proto u8proto.U8proto) uint16
PolicyDebug(fields logrus.Fields, msg string)
IsHost() bool
MapStateSize() int
}
// newSelectorPolicy returns an empty selectorPolicy stub.
func newSelectorPolicy(selectorCache *SelectorCache) *selectorPolicy {
return &selectorPolicy{
Revision: 0,
SelectorCache: selectorCache,
L4Policy: NewL4Policy(0),
}
}
// insertUser adds a user to the L4Policy so that incremental
// updates of the L4Policy may be fowarded.
func (p *selectorPolicy) insertUser(user *EndpointPolicy) {
p.L4Policy.insertUser(user)
}
// removeUser removes a user from the L4Policy so the EndpointPolicy
// can be freed when not needed any more
func (p *selectorPolicy) removeUser(user *EndpointPolicy) {
p.L4Policy.removeUser(user)
}
// Detach releases resources held by a selectorPolicy to enable
// successful eventual GC. Note that the selectorPolicy itself if not
// modified in any way, so that it can be used concurrently.
func (p *selectorPolicy) Detach() {
p.L4Policy.Detach(p.SelectorCache)
}
// DistillPolicy filters down the specified selectorPolicy (which acts
// upon selectors) into a set of concrete map entries based on the
// SelectorCache. These can subsequently be plumbed into the datapath.
//
// Called without holding the Selector cache or Repository locks.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (p *selectorPolicy) DistillPolicy(policyOwner PolicyOwner, redirects map[string]uint16) *EndpointPolicy {
var calculatedPolicy *EndpointPolicy
// EndpointPolicy is initialized while 'GetCurrentVersionHandleFunc' keeps the selector
// cache write locked. This syncronizes the SelectorCache handle creation and the insertion
// of the new policy to the selectorPolicy before any new incremental updated can be
// generated.
//
// With this we have to following guarantees:
// - Selections seen with the 'version' are the ones available at the time of the 'version'
// creation, and the IDs therein have been applied to all Selectors cached at the time.
// - All further incremental updates are delivered to 'policyMapChanges' as whole
// transactions, i.e, changes to all selectors due to addition or deletion of new/old
// identities are visible in the set of changes processed and returned by
// ConsumeMapChanges().
p.SelectorCache.GetVersionHandleFunc(func(version *versioned.VersionHandle) {
calculatedPolicy = &EndpointPolicy{
selectorPolicy: p,
VersionHandle: version,
policyMapState: newMapState(policyOwner.MapStateSize()),
policyMapChanges: MapChanges{
firstVersion: version.Version(),
},
PolicyOwner: policyOwner,
Redirects: redirects,
}
// Register the new EndpointPolicy as a receiver of incremental
// updates before selector cache lock is released by 'GetCurrentVersionHandleFunc'.
p.insertUser(calculatedPolicy)
})
if !p.IngressPolicyEnabled || !p.EgressPolicyEnabled {
calculatedPolicy.policyMapState.allowAllIdentities(
!p.IngressPolicyEnabled, !p.EgressPolicyEnabled)
}
// Must come after the 'insertUser()' above to guarantee
// PolicyMapChanges will contain all changes that are applied
// after the computation of PolicyMapState has started.
calculatedPolicy.toMapState()
if !policyOwner.IsHost() {
calculatedPolicy.policyMapState.determineAllowLocalhostIngress()
}
return calculatedPolicy
}
// Ready releases the handle on a selector cache version so that stale state can be released.
// This should be called when the policy has been realized.
func (p *EndpointPolicy) Ready() (err error) {
// release resources held for this version
err = p.VersionHandle.Close()
p.VersionHandle = nil
return err
}
// Detach removes EndpointPolicy references from selectorPolicy
// to allow the EndpointPolicy to be GC'd.
// PolicyOwner (aka Endpoint) is also locked during this call.
func (p *EndpointPolicy) Detach() {
p.selectorPolicy.removeUser(p)
// in case the call was missed previouly
if p.Ready() == nil {
// succeeded, so it was missed previously
_, file, line, _ := runtime.Caller(1)
log.Warningf("Detach: EndpointPolicy was not marked as Ready (%s:%d)", file, line)
}
// Also release the version handle held for incremental updates, if any.
// This must be done after the removeUser() call above, so that we do not get a new version
// handles any more!
p.policyMapChanges.detach()
}
func (p *EndpointPolicy) Len() int {
return p.policyMapState.Len()
}
func (p *EndpointPolicy) Get(key Key) (MapStateEntry, bool) {
return p.policyMapState.Get(key)
}
var errMissingKey = errors.New("Key not found")
// GetRuleLabels returns the list of labels of the rules that contributed
// to the entry at this key.
// The returned string is the string representation of a LabelArrayList.
func (p *EndpointPolicy) GetRuleLabels(k Key) (string, error) {
entry, ok := p.policyMapState.get(k)
if !ok {
return "", errMissingKey
}
return entry.derivedFromRules.Value(), nil
}
func (p *EndpointPolicy) Entries() iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
p.policyMapState.ForEach(yield)
}
}
func (p *EndpointPolicy) Equals(other MapStateMap) bool {
return p.policyMapState.Equals(other)
}
func (p *EndpointPolicy) Diff(expected MapStateMap) string {
return p.policyMapState.Diff(expected)
}
func (p *EndpointPolicy) Empty() bool {
return p.policyMapState.Empty()
}
// Updated returns an iterator for all key/entry pairs in 'p' that are either new or updated
// compared to the entries in 'realized'.
// Here 'realized' is another EndpointPolicy.
// This can be used to figure out which entries need to be added to or updated in 'realised'.
func (p *EndpointPolicy) Updated(realized *EndpointPolicy) iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
p.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool {
if oldEntry, ok := realized.policyMapState.Get(key); !ok || oldEntry != entry {
if !yield(key, entry) {
return false
}
}
return true
})
}
}
// Missing returns an iterator for all key/entry pairs in 'realized' that missing from 'p'.
// Here 'realized' is another EndpointPolicy.
// This can be used to figure out which entries in 'realised' need to be deleted.
func (p *EndpointPolicy) Missing(realized *EndpointPolicy) iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
realized.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool {
// If key that is in realized state is not in desired state, just remove it.
if _, ok := p.policyMapState.Get(key); !ok {
if !yield(key, entry) {
return false
}
}
return true
})
}
}
// UpdatedMap returns an iterator for all key/entry pairs in 'p' that are either new or updated
// compared to the entries in 'realized'.
// Here 'realized' is MapStateMap.
// This can be used to figure out which entries need to be added to or updated in 'realised'.
func (p *EndpointPolicy) UpdatedMap(realized MapStateMap) iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
p.policyMapState.ForEach(func(key Key, entry MapStateEntry) bool {
if oldEntry, ok := realized[key]; !ok || oldEntry != entry {
if !yield(key, entry) {
return false
}
}
return true
})
}
}
// Missing returns an iterator for all key/entry pairs in 'realized' that missing from 'p'.
// Here 'realized' is MapStateMap.
// This can be used to figure out which entries in 'realised' need to be deleted.
func (p *EndpointPolicy) MissingMap(realized MapStateMap) iter.Seq2[Key, MapStateEntry] {
return func(yield func(Key, MapStateEntry) bool) {
for k, v := range realized {
// If key that is in realized state is not in desired state, just remove it.
if _, ok := p.policyMapState.Get(k); !ok {
if !yield(k, v) {
break
}
}
}
}
}
func (p *EndpointPolicy) RevertChanges(changes ChangeState) {
// SelectorCache used as Identities interface which only has GetPrefix() that needs no lock
p.policyMapState.revertChanges(changes)
}
// toMapState transforms the EndpointPolicy.L4Policy into
// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
// Called with selectorcache locked for reading.
// Called without holding the Repository lock.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (p *EndpointPolicy) toMapState() {
p.L4Policy.Ingress.toMapState(p)
p.L4Policy.Egress.toMapState(p)
}
// toMapState transforms the L4DirectionPolicy into
// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
// Called with selectorcache locked for reading.
// Called without holding the Repository lock.
// PolicyOwner (aka Endpoint) is also unlocked during this call,
// but the Endpoint's build mutex is held.
func (l4policy L4DirectionPolicy) toMapState(p *EndpointPolicy) {
l4policy.PortRules.ForEach(func(l4 *L4Filter) bool {
l4.toMapState(p, l4policy.features, ChangeState{})
return true
})
}
// RedirectFilters returns an iterator for each L4Filter with a redirect in the policy.
func (p *selectorPolicy) RedirectFilters() iter.Seq2[*L4Filter, *PerSelectorPolicy] {
return func(yield func(*L4Filter, *PerSelectorPolicy) bool) {
if p.L4Policy.Ingress.forEachRedirectFilter(yield) {
p.L4Policy.Egress.forEachRedirectFilter(yield)
}
}
}
func (l4policy L4DirectionPolicy) forEachRedirectFilter(yield func(*L4Filter, *PerSelectorPolicy) bool) bool {
ok := true
l4policy.PortRules.ForEach(func(l4 *L4Filter) bool {
if l4.IsRedirect() {
for _, ps := range l4.PerSelectorPolicies {
if ps != nil && ps.IsRedirect() {
ok = yield(l4, ps)
}
}
}
return ok
})
return ok
}
// ConsumeMapChanges applies accumulated MapChanges to EndpointPolicy 'p' and returns a symmary of changes.
// Caller is responsible for calling the returned 'closer' to release resources held for the new version!
// 'closer' may not be called while selector cache is locked!
func (p *EndpointPolicy) ConsumeMapChanges() (closer func(), changes ChangeState) {
features := p.selectorPolicy.L4Policy.Ingress.features | p.selectorPolicy.L4Policy.Egress.features
version, changes := p.policyMapChanges.consumeMapChanges(p, features)
closer = func() {}
if version.IsValid() {
var msg string
// update the version handle in p.VersionHandle so that any follow-on processing
// acts on the basis of the new version
if p.VersionHandle.IsValid() {
p.VersionHandle.Close()
msg = "ConsumeMapChanges: updated valid version"
} else {
closer = func() {
// p.VersionHandle was not valid, close it
p.Ready()
}
msg = "ConsumeMapChanges: new incremental version"
}
p.VersionHandle = version
p.PolicyOwner.PolicyDebug(logrus.Fields{
logfields.Version: version,
logfields.Changes: changes,
}, msg)
}
return closer, changes
}
// NewEndpointPolicy returns an empty EndpointPolicy stub.
func NewEndpointPolicy(repo PolicyRepository) *EndpointPolicy {
return &EndpointPolicy{
selectorPolicy: newSelectorPolicy(repo.GetSelectorCache()),
policyMapState: emptyMapState(),
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"net/netip"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
k8stypes "k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
)
func GenerateL3IngressDenyRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
ingDenyRule := api.IngressDenyRule{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{barSelector},
},
}
rules := make(api.Rules, 0, numRules)
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
IngressDeny: []api.IngressDenyRule{ingDenyRule},
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateNumIdentities(3000)
}
// generate a CIDR identity for each unique CIDR rule in 'rules'
func generateCIDRIdentities(rules api.Rules) identity.IdentityMap {
c := make(identity.IdentityMap, len(rules))
prefixes := make(map[string]identity.NumericIdentity)
id := identity.IdentityScopeLocal
addPrefix := func(prefix string) {
if _, exists := prefixes[prefix]; !exists {
lbls := labels.GetCIDRLabels(netip.MustParsePrefix(prefix))
id++
c[id] = lbls.LabelArray()
prefixes[prefix] = id
}
}
for _, rule := range rules {
for _, egress := range rule.Egress {
for _, toCIDR := range egress.ToCIDR {
addPrefix(string(toCIDR))
}
}
for _, egress := range rule.EgressDeny {
for _, toCIDR := range egress.ToCIDR {
addPrefix(string(toCIDR))
}
}
for _, egress := range rule.Ingress {
for _, toCIDR := range egress.FromCIDR {
addPrefix(string(toCIDR))
}
}
for _, egress := range rule.IngressDeny {
for _, toCIDR := range egress.FromCIDR {
addPrefix(string(toCIDR))
}
}
}
return c
}
func generateCIDREgressRule(i int) api.EgressRule {
port := fmt.Sprintf("%d", 80+i%97)
prefix := []string{"8", "16", "24", "28", "32"}[i%5]
var net string
switch prefix {
case "8":
net = []string{"10.0.0.0", "192.0.0.0", "244.0.0.0"}[i%3]
case "16":
pat := []string{"10.%d.0.0", "192.%d.0.0", "244.%d.0.0"}[i%3]
net = fmt.Sprintf(pat, i%17)
case "24":
pat := []string{"10.%d.%d.0", "192.%d.%d.0", "244.%d.%d.0"}[i%3]
net = fmt.Sprintf(pat, i%17, i%121)
case "28":
pat := []string{"10.%d.%d.%d", "192.%d.%d.%d", "244.%d.%d.%d"}[i%3]
net = fmt.Sprintf(pat, i%17, i%121, i%16<<4)
case "32":
pat := []string{"10.%d.%d.%d", "192.%d.%d.%d", "244.%d.%d.%d"}[i%3]
net = fmt.Sprintf(pat, i%17, i%121, i%255)
}
cidr := net + "/" + prefix
return api.EgressRule{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{api.CIDR(cidr)},
},
ToPorts: []api.PortRule{
{
Ports: []api.PortProtocol{
{
Port: port,
Protocol: api.ProtoTCP,
},
},
},
},
}
}
func generateCIDREgressDenyRule(i int) api.EgressDenyRule {
port := fmt.Sprintf("%d", 80+i%131)
prefix := []string{"8", "16", "24", "28", "32"}[(i+21)%5]
var net string
switch prefix {
case "8":
net = []string{"10.0.0.0", "192.0.0.0", "244.0.0.0"}[i%3]
case "16":
pat := []string{"10.%d.0.0", "192.%d.0.0", "244.%d.0.0"}[i%3]
net = fmt.Sprintf(pat, i%23)
case "24":
pat := []string{"10.%d.%d.0", "192.%d.%d.0", "244.%d.%d.0"}[i%3]
net = fmt.Sprintf(pat, i%23, i%119)
case "28":
pat := []string{"10.%d.%d.%d", "192.%d.%d.%d", "244.%d.%d.%d"}[i%3]
net = fmt.Sprintf(pat, i%23, i%119, i%15<<4)
case "32":
pat := []string{"10.%d.%d.%d", "192.%d.%d.%d", "244.%d.%d.%d"}[i%3]
net = fmt.Sprintf(pat, i%23, i%119, i%253)
}
cidr := net + "/" + prefix
return api.EgressDenyRule{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{api.CIDR(cidr)},
},
ToPorts: []api.PortDenyRule{
{
Ports: []api.PortProtocol{
{
Port: port,
Protocol: api.ProtoTCP,
},
},
},
},
}
}
func GenerateCIDRDenyRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
var rules api.Rules
for i := 1; i <= numRules; i++ {
uuid := k8stypes.UID(fmt.Sprintf("12bba160-ddca-13e8-%04x-0800273b04ff", i))
rule := api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{generateCIDREgressRule(i)},
EgressDeny: []api.EgressDenyRule{generateCIDREgressDenyRule(i + 773)},
Labels: utils.GetPolicyLabels("default", fmt.Sprintf("cidr-%d", i), uuid, utils.ResourceTypeCiliumNetworkPolicy),
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateCIDRIdentities(rules)
}
func BenchmarkRegenerateCIDRDenyPolicyRules(b *testing.B) {
td := newTestData()
td.bootstrapRepo(GenerateCIDRDenyRules, 1000, b)
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
owner := DummyOwner{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
epPolicy := ip.DistillPolicy(owner, nil)
owner.mapStateSize = epPolicy.policyMapState.Len()
epPolicy.Ready()
}
ip.Detach()
b.Logf("Number of MapState entries: %d\n", owner.mapStateSize)
}
func TestRegenerateCIDRDenyPolicyRules(t *testing.T) {
td := newTestData()
td.bootstrapRepo(GenerateCIDRDenyRules, 10, t)
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
epPolicy := ip.DistillPolicy(DummyOwner{}, nil)
n := epPolicy.policyMapState.Len()
epPolicy.Ready()
ip.Detach()
assert.Positive(t, n)
}
func TestL3WithIngressDenyWildcard(t *testing.T) {
td := newTestData()
repo := td.repo
td.bootstrapRepo(GenerateL3IngressDenyRules, 1000, t)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
IngressDeny: []api.IngressDenyRule{
{
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(DummyOwner{}, nil)
policy.Ready()
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{IsDeny: true},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
}),
features: denyRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
},
PolicyOwner: DummyOwner{},
// inherit this from the result as it is outside of the scope
// of this test
policyMapState: policy.policyMapState,
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestL3WithLocalHostWildcardd(t *testing.T) {
td := newTestData()
repo := td.repo
td.bootstrapRepo(GenerateL3IngressDenyRules, 1000, t)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
// Emulate Kubernetes mode with allow from localhost
oldLocalhostOpt := option.Config.AllowLocalhost
option.Config.AllowLocalhost = option.AllowLocalhostAlways
defer func() { option.Config.AllowLocalhost = oldLocalhostOpt }()
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
IngressDeny: []api.IngressDenyRule{
{
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(DummyOwner{}, nil)
policy.Ready()
cachedSelectorHost := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameHost])
require.NotNil(t, cachedSelectorHost)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{IsDeny: true},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
}),
features: denyRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
},
PolicyOwner: DummyOwner{},
// inherit this from the result as it is outside of the scope
// of this test
policyMapState: policy.policyMapState,
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestMapStateWithIngressDenyWildcard(t *testing.T) {
td := newTestData()
repo := td.repo
td.bootstrapRepo(GenerateL3IngressDenyRules, 1000, t)
ruleLabel := labels.ParseLabelArray("rule-foo-allow-port-80")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
IngressDeny: []api.IngressDenyRule{
{
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(DummyOwner{}, nil)
policy.Ready()
rule1MapStateEntry := denyEntry().withLabels(labels.LabelArrayList{ruleLabel})
allowEgressMapStateEntry := newAllowEntryWithLabels(ruleLabelAllowAnyEgress)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{IsDeny: true},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {ruleLabel}}),
},
}),
features: denyRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
},
PolicyOwner: DummyOwner{},
policyMapState: emptyMapState().withState(mapStateMap{
// Although we have calculated deny policies, the overall policy
// will still allow egress to world.
EgressKey(): allowEgressMapStateEntry,
IngressKey().WithTCPPort(80): rule1MapStateEntry,
}),
}
// Add new identity to test accumulation of MapChanges
added1 := identity.IdentityMap{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1"),
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(added1, nil, wg)
// Cleanup the identities from the testSelectorCache
defer td.sc.UpdateIdentities(nil, added1, wg)
wg.Wait()
require.Empty(t, policy.policyMapChanges.synced)
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestMapStateWithIngressDeny(t *testing.T) {
td := newTestData()
repo := td.repo
td.bootstrapRepo(GenerateL3IngressDenyRules, 1000, t)
ruleLabel := labels.ParseLabelArray("rule-deny-port-80-world-and-test")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
lblTest := labels.ParseLabel("id=resolve_test_1")
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
IngressDeny: []api.IngressDenyRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld},
},
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(lblTest),
},
},
ToPorts: []api.PortDenyRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(DummyOwner{}, nil)
policy.Ready()
// Add new identity to test accumulation of MapChanges
added1 := identity.IdentityMap{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1", "num=1"),
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
identity.NumericIdentity(194): labels.ParseSelectLabelArray("id=resolve_test_1", "num=3"),
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(added1, nil, wg)
wg.Wait()
require.Len(t, policy.policyMapChanges.synced, 3)
deleted1 := identity.IdentityMap{
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
}
wg = &sync.WaitGroup{}
td.sc.UpdateIdentities(nil, deleted1, wg)
wg.Wait()
require.Len(t, policy.policyMapChanges.synced, 4)
cachedSelectorWorld := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorld])
require.NotNil(t, cachedSelectorWorld)
cachedSelectorWorldV4 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv4])
require.NotNil(t, cachedSelectorWorldV4)
cachedSelectorWorldV6 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv6])
require.NotNil(t, cachedSelectorWorldV6)
cachedSelectorTest := td.sc.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
require.NotNil(t, cachedSelectorTest)
rule1MapStateEntry := denyEntry().withLabels(labels.LabelArrayList{ruleLabel})
allowEgressMapStateEntry := newAllowEntryWithLabels(ruleLabelAllowAnyEgress)
expectedEndpointPolicy := EndpointPolicy{
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeNone,
Ingress: true,
PerSelectorPolicies: L7DataMap{
cachedSelectorWorld: &PerSelectorPolicy{IsDeny: true},
cachedSelectorWorldV4: &PerSelectorPolicy{IsDeny: true},
cachedSelectorWorldV6: &PerSelectorPolicy{IsDeny: true},
cachedSelectorTest: &PerSelectorPolicy{IsDeny: true},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
cachedSelectorWorld: {ruleLabel},
cachedSelectorWorldV4: {ruleLabel},
cachedSelectorWorldV6: {ruleLabel},
cachedSelectorTest: {ruleLabel},
}),
},
}),
features: denyRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
},
PolicyOwner: DummyOwner{},
policyMapState: emptyMapState().withState(mapStateMap{
// Although we have calculated deny policies, the overall policy
// will still allow egress to world.
EgressKey(): allowEgressMapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorld).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorldIPv4).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorldIPv6).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(192).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(194).WithTCPPort(80): rule1MapStateEntry,
}),
}
closer, changes := policy.ConsumeMapChanges()
closer()
// maps on the policy got cleared
require.Equal(t, Keys{
ingressKey(192, 6, 80, 0): {},
ingressKey(194, 6, 80, 0): {},
}, changes.Adds)
require.Equal(t, Keys{
ingressKey(193, 6, 80, 0): {},
}, changes.Deletes)
// Have to remove circular reference before testing for Equality to avoid an infinite loop
policy.selectorPolicy.Detach()
// Verify that cached selector is not found after Detach().
// Note that this depends on the other tests NOT using the same selector concurrently!
cachedSelectorTest = td.sc.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
require.Nil(t, cachedSelectorTest)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"sync"
"testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
k8stypes "k8s.io/apimachinery/pkg/types"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/u8proto"
)
var (
fooLabel = labels.NewLabel("k8s:foo", "", "")
lbls = labels.Labels{
"foo": fooLabel,
}
fooIdentity = &identity.Identity{
ID: 303,
Labels: lbls,
LabelArray: lbls.LabelArray(),
}
)
var testRedirects = map[string]uint16{
"1234:ingress:TCP:80:": 1,
}
func generateNumIdentities(numIdentities int) identity.IdentityMap {
c := make(identity.IdentityMap, numIdentities)
for i := 0; i < numIdentities; i++ {
identityLabel := labels.NewLabel(fmt.Sprintf("k8s:foo%d", i), "", "")
clusterLabel := labels.NewLabel("io.cilium.k8s.policy.cluster=default", "", labels.LabelSourceK8s)
serviceAccountLabel := labels.NewLabel("io.cilium.k8s.policy.serviceaccount=default", "", labels.LabelSourceK8s)
namespaceLabel := labels.NewLabel("io.kubernetes.pod.namespace=monitoring", "", labels.LabelSourceK8s)
funLabel := labels.NewLabel("app=analytics-erneh", "", labels.LabelSourceK8s)
identityLabels := labels.Labels{
fmt.Sprintf("foo%d", i): identityLabel,
"k8s:io.cilium.k8s.policy.cluster=default": clusterLabel,
"k8s:io.cilium.k8s.policy.serviceaccount=default": serviceAccountLabel,
"k8s:io.kubernetes.pod.namespace=monitoring": namespaceLabel,
"k8s:app=analytics-erneh": funLabel,
}
bumpedIdentity := i + 1000
numericIdentity := identity.NumericIdentity(bumpedIdentity)
c[numericIdentity] = identityLabels.LabelArray()
}
return c
}
func GenerateL3IngressRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
ingRule := api.IngressRule{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{barSelector},
},
}
var rules api.Rules
uuid := k8stypes.UID("11bba160-ddca-13e8-b697-0800273b04ff")
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Ingress: []api.IngressRule{ingRule},
Labels: utils.GetPolicyLabels("default", "l3-ingress", uuid, utils.ResourceTypeCiliumNetworkPolicy),
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateNumIdentities(3000)
}
func GenerateL3EgressRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
// Change ingRule and rule in the for-loop below to change what type of rules
// are added into the policy repository.
egRule := api.EgressRule{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{barSelector},
},
}
var rules api.Rules
uuid := k8stypes.UID("13bba160-ddca-13e8-b697-0800273b04ff")
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{egRule},
Labels: utils.GetPolicyLabels("default", "l3-egress", uuid, utils.ResourceTypeCiliumNetworkPolicy),
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateNumIdentities(3000)
}
func GenerateCIDRRules(numRules int) (api.Rules, identity.IdentityMap) {
parseFooLabel := labels.ParseSelectLabel("k8s:foo")
fooSelector := api.NewESFromLabels(parseFooLabel)
//barSelector := api.NewESFromLabels(labels.ParseSelectLabel("bar"))
var rules api.Rules
uuid := k8stypes.UID("12bba160-ddca-13e8-b697-0800273b04ff")
for i := 1; i <= numRules; i++ {
rule := api.Rule{
EndpointSelector: fooSelector,
Egress: []api.EgressRule{generateCIDREgressRule(i)},
Labels: utils.GetPolicyLabels("default", "cidr", uuid, utils.ResourceTypeCiliumNetworkPolicy),
}
rule.Sanitize()
rules = append(rules, &rule)
}
return rules, generateCIDRIdentities(rules)
}
type DummyOwner struct {
mapStateSize int
}
func (d DummyOwner) CreateRedirects(*L4Filter) {
}
func (d DummyOwner) GetNamedPort(ingress bool, name string, proto u8proto.U8proto) uint16 {
return 80
}
func (d DummyOwner) GetNamedPortLocked(ingress bool, name string, proto u8proto.U8proto) uint16 {
return 80
}
func (d DummyOwner) GetID() uint64 {
return 1234
}
func (d DummyOwner) IsHost() bool {
return false
}
func (d DummyOwner) MapStateSize() int {
return d.mapStateSize
}
func (d DummyOwner) PolicyDebug(fields logrus.Fields, msg string) {
log.WithFields(fields).Info(msg)
}
func (td *testData) bootstrapRepo(ruleGenFunc func(int) (api.Rules, identity.IdentityMap), numRules int, tb testing.TB) {
SetPolicyEnabled(option.DefaultEnforcement)
wg := &sync.WaitGroup{}
// load in standard reserved identities
c := identity.IdentityMap{
fooIdentity.ID: fooIdentity.LabelArray,
}
identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
c[ni] = id.Labels.LabelArray()
})
td.sc.UpdateIdentities(c, nil, wg)
apiRules, ids := ruleGenFunc(numRules)
td.sc.UpdateIdentities(ids, nil, wg)
wg.Wait()
td.repo.MustAddList(apiRules)
}
func BenchmarkRegenerateCIDRPolicyRules(b *testing.B) {
td := newTestData()
td.bootstrapRepo(GenerateCIDRRules, 1000, b)
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
owner := DummyOwner{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
epPolicy := ip.DistillPolicy(owner, nil)
owner.mapStateSize = epPolicy.policyMapState.Len()
epPolicy.Ready()
}
ip.Detach()
b.Logf("Number of MapState entries: %d\n", owner.mapStateSize)
}
func BenchmarkRegenerateL3IngressPolicyRules(b *testing.B) {
td := newTestData()
td.bootstrapRepo(GenerateL3IngressRules, 1000, b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
policy := ip.DistillPolicy(DummyOwner{}, nil)
policy.Ready()
ip.Detach()
}
}
func BenchmarkRegenerateL3EgressPolicyRules(b *testing.B) {
td := newTestData()
td.bootstrapRepo(GenerateL3EgressRules, 1000, b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ip, _ := td.repo.resolvePolicyLocked(fooIdentity)
policy := ip.DistillPolicy(DummyOwner{}, nil)
policy.Ready()
ip.Detach()
}
}
func TestL7WithIngressWildcard(t *testing.T) {
td := newTestData()
repo := td.repo
td.bootstrapRepo(GenerateL3IngressRules, 1000, t)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/good"},
},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
require.Equal(t, redirectTypeEnvoy, selPolicy.L4Policy.redirectTypes)
policy := selPolicy.DistillPolicy(DummyOwner{}, testRedirects)
policy.Ready()
expectedEndpointPolicy := EndpointPolicy{
Redirects: testRedirects,
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET", Path: "/good"}},
},
CanShortCircuit: true,
isRedirect: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
}),
features: redirectRules,
},
Egress: newL4DirectionPolicy(),
redirectTypes: redirectTypeEnvoy,
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{},
// inherit this from the result as it is outside of the scope
// of this test
policyMapState: policy.policyMapState,
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestL7WithLocalHostWildcard(t *testing.T) {
td := newTestData()
repo := td.repo
td.bootstrapRepo(GenerateL3IngressRules, 1000, t)
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
// Emulate Kubernetes mode with allow from localhost
oldLocalhostOpt := option.Config.AllowLocalhost
option.Config.AllowLocalhost = option.AllowLocalhostAlways
defer func() { option.Config.AllowLocalhost = oldLocalhostOpt }()
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/good"},
},
},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(DummyOwner{}, testRedirects)
policy.Ready()
cachedSelectorHost := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameHost])
require.NotNil(t, cachedSelectorHost)
expectedEndpointPolicy := EndpointPolicy{
Redirects: testRedirects,
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET", Path: "/good"}},
},
CanShortCircuit: true,
isRedirect: true,
},
cachedSelectorHost: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
}),
features: redirectRules,
},
Egress: newL4DirectionPolicy(),
redirectTypes: redirectTypeEnvoy,
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{},
// inherit this from the result as it is outside of the scope
// of this test
policyMapState: policy.policyMapState,
}
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestMapStateWithIngressWildcard(t *testing.T) {
td := newTestData()
repo := td.repo
td.bootstrapRepo(GenerateL3IngressRules, 1000, t)
ruleLabel := labels.ParseLabelArray("rule-foo-allow-port-80")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(DummyOwner{}, testRedirects)
policy.Ready()
rule1MapStateEntry := newAllowEntryWithLabels(ruleLabel)
allowEgressMapStateEntry := newAllowEntryWithLabels(ruleLabelAllowAnyEgress)
expectedEndpointPolicy := EndpointPolicy{
Redirects: testRedirects,
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeNone,
Ingress: true,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {ruleLabel}}),
},
})},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{},
policyMapState: emptyMapState().withState(mapStateMap{
EgressKey(): allowEgressMapStateEntry,
IngressKey().WithTCPPort(80): rule1MapStateEntry,
}),
}
// Add new identity to test accumulation of MapChanges
added1 := identity.IdentityMap{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1"),
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(added1, nil, wg)
wg.Wait()
require.Empty(t, policy.policyMapChanges.synced) // XXX why 0?
// Have to remove circular reference before testing to avoid an infinite loop
policy.selectorPolicy.Detach()
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
policy.policyMapState = mapState{}
expectedEndpointPolicy.policyMapState = mapState{}
require.Equal(t, &expectedEndpointPolicy, policy)
}
func TestMapStateWithIngress(t *testing.T) {
td := newTestData()
repo := td.repo
td.bootstrapRepo(GenerateL3IngressRules, 1000, t)
ruleLabel := labels.ParseLabelArray("rule-world-allow-port-80")
ruleLabelAllowAnyEgress := labels.LabelArray{
labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
}
idFooSelectLabelArray := labels.ParseSelectLabelArray("id=foo")
idFooSelectLabels := labels.Labels{}
for _, lbl := range idFooSelectLabelArray {
idFooSelectLabels[lbl.Key] = lbl
}
fooIdentity := identity.NewIdentity(12345, idFooSelectLabels)
td.addIdentity(fooIdentity)
lblTest := labels.ParseLabel("id=resolve_test_1")
selFoo := api.NewESFromLabels(labels.ParseSelectLabel("id=foo"))
rule1 := api.Rule{
EndpointSelector: selFoo,
Labels: ruleLabel,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
{
Authentication: &api.Authentication{
Mode: api.AuthenticationModeDisabled,
},
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.NewESFromLabels(lblTest),
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{},
}},
},
},
}
rule1.Sanitize()
_, _, err := repo.mustAdd(rule1)
require.NoError(t, err)
repo.mutex.RLock()
defer repo.mutex.RUnlock()
selPolicy, err := repo.resolvePolicyLocked(fooIdentity)
require.NoError(t, err)
policy := selPolicy.DistillPolicy(DummyOwner{}, testRedirects)
policy.Ready()
// Add new identity to test accumulation of MapChanges
added1 := identity.IdentityMap{
identity.NumericIdentity(192): labels.ParseSelectLabelArray("id=resolve_test_1", "num=1"),
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
identity.NumericIdentity(194): labels.ParseSelectLabelArray("id=resolve_test_1", "num=3"),
}
wg := &sync.WaitGroup{}
td.sc.UpdateIdentities(added1, nil, wg)
wg.Wait()
require.Len(t, policy.policyMapChanges.synced, 3)
deleted1 := identity.IdentityMap{
identity.NumericIdentity(193): labels.ParseSelectLabelArray("id=resolve_test_1", "num=2"),
}
wg = &sync.WaitGroup{}
td.sc.UpdateIdentities(nil, deleted1, wg)
wg.Wait()
require.Len(t, policy.policyMapChanges.synced, 4)
cachedSelectorWorld := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorld])
require.NotNil(t, cachedSelectorWorld)
cachedSelectorWorldV4 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv4])
require.NotNil(t, cachedSelectorWorldV4)
cachedSelectorWorldV6 := td.sc.FindCachedIdentitySelector(api.ReservedEndpointSelectors[labels.IDNameWorldIPv6])
require.NotNil(t, cachedSelectorWorldV6)
cachedSelectorTest := td.sc.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
require.NotNil(t, cachedSelectorTest)
rule1MapStateEntry := newAllowEntryWithLabels(ruleLabel)
allowEgressMapStateEntry := newAllowEntryWithLabels(ruleLabelAllowAnyEgress)
expectedEndpointPolicy := EndpointPolicy{
Redirects: testRedirects,
selectorPolicy: &selectorPolicy{
Revision: repo.GetRevision(),
SelectorCache: repo.GetSelectorCache(),
L4Policy: L4Policy{
Revision: repo.GetRevision(),
Ingress: L4DirectionPolicy{PortRules: NewL4PolicyMapWithValues(map[string]*L4Filter{
"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 0x6,
L7Parser: ParserTypeNone,
Ingress: true,
PerSelectorPolicies: L7DataMap{
cachedSelectorWorld: nil,
cachedSelectorWorldV4: nil,
cachedSelectorWorldV6: nil,
cachedSelectorTest: &PerSelectorPolicy{
Authentication: &api.Authentication{
Mode: api.AuthenticationModeDisabled,
},
CanShortCircuit: true,
},
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
cachedSelectorWorld: {ruleLabel},
cachedSelectorWorldV4: {ruleLabel},
cachedSelectorWorldV6: {ruleLabel},
cachedSelectorTest: {ruleLabel},
}),
},
}),
features: authRules,
},
Egress: newL4DirectionPolicy(),
},
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyOwner: DummyOwner{},
policyMapState: emptyMapState().withState(mapStateMap{
EgressKey(): allowEgressMapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorld).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorldIPv4).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(identity.ReservedIdentityWorldIPv6).WithTCPPort(80): rule1MapStateEntry,
IngressKey().WithIdentity(192).WithTCPPort(80): rule1MapStateEntry.withExplicitAuth(AuthTypeDisabled),
IngressKey().WithIdentity(194).WithTCPPort(80): rule1MapStateEntry.withExplicitAuth(AuthTypeDisabled),
}),
}
// Have to remove circular reference before testing for Equality to avoid an infinite loop
policy.selectorPolicy.Detach()
// Verify that cached selector is not found after Detach().
// Note that this depends on the other tests NOT using the same selector concurrently!
cachedSelectorTest = td.sc.FindCachedIdentitySelector(api.NewESFromLabels(lblTest))
require.Nil(t, cachedSelectorTest)
closer, changes := policy.ConsumeMapChanges()
closer()
// maps on the policy got cleared
require.Nil(t, policy.policyMapChanges.synced)
require.Equal(t, Keys{
ingressKey(192, 6, 80, 0): {},
ingressKey(194, 6, 80, 0): {},
}, changes.Adds)
require.Equal(t, Keys{
ingressKey(193, 6, 80, 0): {},
}, changes.Deletes)
// Assign an empty mutex so that checker.Equal does not complain about the
// difference of the internal time.Time from the lock_debug.go.
policy.selectorPolicy.L4Policy.mutex = lock.RWMutex{}
policy.policyMapChanges.mutex = lock.Mutex{}
policy.policyMapChanges.firstVersion = 0
// policyMapState cannot be compared via DeepEqual
require.Truef(t, policy.policyMapState.Equal(&expectedEndpointPolicy.policyMapState), policy.policyMapState.diff(&expectedEndpointPolicy.policyMapState))
require.EqualExportedValues(t, &expectedEndpointPolicy, policy)
}
// allowsIdentity returns whether the specified policy allows
// ingress and egress traffic for the specified numeric security identity.
// If the 'secID' is zero, it will check if all traffic is allowed.
//
// Returning true for either return value indicates all traffic is allowed.
func (p *EndpointPolicy) allowsIdentity(identity identity.NumericIdentity) (ingress, egress bool) {
if !p.IngressPolicyEnabled {
ingress = true
} else {
key := IngressKey().WithIdentity(identity)
if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny() {
ingress = true
}
}
if !p.EgressPolicyEnabled {
egress = true
} else {
key := EgressKey().WithIdentity(identity)
if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny() {
egress = true
}
}
return ingress, egress
}
func TestEndpointPolicy_AllowsIdentity(t *testing.T) {
type fields struct {
selectorPolicy *selectorPolicy
PolicyMapState mapState
}
type args struct {
identity identity.NumericIdentity
}
tests := []struct {
name string
fields fields
args args
wantIngress bool
wantEgress bool
}{
{
name: "policy disabled",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: false,
EgressPolicyEnabled: false,
},
PolicyMapState: emptyMapState(),
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: true,
},
{
name: "policy enabled",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState(),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy enabled for ingress",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState().withState(mapStateMap{
IngressKey(): {},
}),
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: false,
},
{
name: "policy enabled for egress",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState().withState(mapStateMap{
EgressKey(): {},
}),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: true,
},
{
name: "policy enabled for ingress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState().withState(mapStateMap{
IngressKey(): NewMapStateEntry(DenyEntry),
}),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy disabled for ingress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: false,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState().withState(mapStateMap{
IngressKey(): NewMapStateEntry(DenyEntry),
}),
},
args: args{
identity: 0,
},
wantIngress: true,
wantEgress: false,
},
{
name: "policy enabled for egress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: true,
},
PolicyMapState: emptyMapState().withState(mapStateMap{
EgressKey(): NewMapStateEntry(DenyEntry),
}),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: false,
},
{
name: "policy disabled for egress with deny policy",
fields: fields{
selectorPolicy: &selectorPolicy{
IngressPolicyEnabled: true,
EgressPolicyEnabled: false,
},
PolicyMapState: emptyMapState().withState(mapStateMap{
EgressKey(): NewMapStateEntry(DenyEntry),
}),
},
args: args{
identity: 0,
},
wantIngress: false,
wantEgress: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &EndpointPolicy{
selectorPolicy: tt.fields.selectorPolicy,
policyMapState: tt.fields.PolicyMapState,
}
gotIngress, gotEgress := p.allowsIdentity(tt.args.identity)
if gotIngress != tt.wantIngress {
t.Errorf("allowsIdentity() gotIngress = %v, want %v", gotIngress, tt.wantIngress)
}
if gotEgress != tt.wantEgress {
t.Errorf("allowsIdentity() gotEgress = %v, want %v", gotEgress, tt.wantEgress)
}
})
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"strconv"
"strings"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types"
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy/api"
policytypes "github.com/cilium/cilium/pkg/policy/types"
)
// ruleKey is a synthetic unique identifier for a Rule
type ruleKey struct {
// resource is the owning resource of this rule
resource ipcachetypes.ResourceID
// idx is an arbitrary unique index, as resources can own multiple rules
idx uint
}
type rule struct {
api.Rule
key ruleKey
// subjectSelector is the entry in the SelectorCache that selects subjects (endpoints or nodes).
subjectSelector CachedSelector
}
// IdentitySelectionUpdated is called by the SelectorCache when a new identity is added;
// We can ignore it because the endpoint will be regenerated by the nature of
// identities being updated.
func (r *rule) IdentitySelectionUpdated(_ policytypes.CachedSelector, _, _ []identity.NumericIdentity) {
}
func (d *rule) IdentitySelectionCommit(*versioned.Tx) {
}
func (r *rule) IsPeerSelector() bool {
return false
}
func (r *rule) String() string {
return r.EndpointSelector.String()
}
func (r *rule) getSelector() *api.EndpointSelector {
if r.NodeSelector.LabelSelector != nil {
return &r.NodeSelector
}
return &r.EndpointSelector
}
func (epd *PerSelectorPolicy) appendL7WildcardRule(ctx *SearchContext) api.L7Rules {
// Wildcard rule only needs to be appended if some rules already exist
switch {
case len(epd.L7Rules.HTTP) > 0:
rule := api.PortRuleHTTP{}
if !rule.Exists(epd.L7Rules) {
ctx.PolicyTrace(" Merging HTTP wildcard rule: %+v\n", rule)
epd.L7Rules.HTTP = append(epd.L7Rules.HTTP, rule)
} else {
ctx.PolicyTrace(" Merging HTTP wildcard rule, equal rule already exists: %+v\n", rule)
}
case len(epd.L7Rules.Kafka) > 0:
rule := kafka.PortRule{}
rule.Sanitize()
if !rule.Exists(epd.L7Rules.Kafka) {
ctx.PolicyTrace(" Merging Kafka wildcard rule: %+v\n", rule)
epd.L7Rules.Kafka = append(epd.L7Rules.Kafka, rule)
} else {
ctx.PolicyTrace(" Merging Kafka wildcard rule, equal rule already exists: %+v\n", rule)
}
case len(epd.L7Rules.DNS) > 0:
// Wildcarding at L7 for DNS is specified via allowing all via
// MatchPattern!
rule := api.PortRuleDNS{MatchPattern: "*"}
rule.Sanitize()
if !rule.Exists(epd.L7Rules) {
ctx.PolicyTrace(" Merging DNS wildcard rule: %+v\n", rule)
epd.L7Rules.DNS = append(epd.L7Rules.DNS, rule)
} else {
ctx.PolicyTrace(" Merging DNS wildcard rule, equal rule already exists: %+v\n", rule)
}
case epd.L7Rules.L7Proto != "" && len(epd.L7Rules.L7) > 0:
rule := api.PortRuleL7{}
if !rule.Exists(epd.L7Rules) {
ctx.PolicyTrace(" Merging L7 wildcard rule: %+v\n", rule)
epd.L7Rules.L7 = append(epd.L7Rules.L7, rule)
} else {
ctx.PolicyTrace(" Merging L7 wildcard rule, equal rule already exists: %+v\n", rule)
}
}
return epd.L7Rules
}
// takesListenerPrecedenceOver returns true if the listener reference in 'l7Rules' takes precedence
// over the listener reference in 'other'.
func (l7Rules *PerSelectorPolicy) takesListenerPrecedenceOver(other *PerSelectorPolicy) bool {
var priority, otherPriority uint8
// decrement by one to wrap the undefined value (0) to be the highest numerical
// value of the uint16, which is the lowest possible priority
priority = l7Rules.Priority - 1
otherPriority = other.Priority - 1
return priority < otherPriority
}
// mergeListenerReference merges listener reference from 'newL7Rules' to 'l7Rules', giving
// precedence to listener with the lowest priority, if any.
func (l7Rules *PerSelectorPolicy) mergeListenerReference(newL7Rules *PerSelectorPolicy) error {
// Nothing to do if 'newL7Rules' has no listener reference
if newL7Rules.Listener == "" {
return nil
}
// Nothing to do if the listeners are already the same and have the same priority
if newL7Rules.Listener == l7Rules.Listener && l7Rules.Priority == newL7Rules.Priority {
return nil
}
// Nothing to do if 'l7Rules' takes precedence
if l7Rules.takesListenerPrecedenceOver(newL7Rules) {
return nil
}
// override if 'l7Rules' has no listener or 'newL7Rules' takes precedence
if l7Rules.Listener == "" || newL7Rules.takesListenerPrecedenceOver(l7Rules) {
l7Rules.Listener = newL7Rules.Listener
l7Rules.Priority = newL7Rules.Priority
return nil
}
// otherwise error on conflict
return fmt.Errorf("cannot merge conflicting CiliumEnvoyConfig Listeners (%v/%v) with the same priority (%d)", newL7Rules.Listener, l7Rules.Listener, l7Rules.Priority)
}
func mergePortProto(ctx *SearchContext, existingFilter, filterToMerge *L4Filter, selectorCache *SelectorCache) (err error) {
// Merge the L7-related data from the filter to merge
// with the L7-related data already in the existing filter.
existingFilter.L7Parser, err = existingFilter.L7Parser.Merge(filterToMerge.L7Parser)
if err != nil {
ctx.PolicyTrace(" Merge conflict: mismatching parsers %s/%s\n", filterToMerge.L7Parser, existingFilter.L7Parser)
return err
}
for cs, newL7Rules := range filterToMerge.PerSelectorPolicies {
// 'cs' will be merged or moved (see below), either way it needs
// to be removed from the map it is in now.
delete(filterToMerge.PerSelectorPolicies, cs)
if l7Rules, ok := existingFilter.PerSelectorPolicies[cs]; ok {
// existing filter already has 'cs', release and merge L7 rules
selectorCache.RemoveSelector(cs, filterToMerge)
// skip merging for reserved:none, as it is never
// selected, and toFQDN rules currently translate to
// reserved:none as an endpoint selector, causing a
// merge conflict for different toFQDN destinations
// with different TLS contexts.
if cs.IsNone() {
continue
}
if l7Rules.Equal(newL7Rules) {
continue // identical rules need no merging
}
// Merge two non-identical sets of non-nil rules
if l7Rules != nil && l7Rules.IsDeny {
// If existing rule is deny then it's a no-op
// Denies takes priority over any rule.
continue
} else if newL7Rules != nil && newL7Rules.IsDeny {
// Overwrite existing filter if the new rule is a deny case
// Denies takes priority over any rule.
existingFilter.PerSelectorPolicies[cs] = newL7Rules
continue
}
// One of the rules may be a nil rule, expand it to an empty non-nil rule
if l7Rules == nil {
l7Rules = &PerSelectorPolicy{}
}
if newL7Rules == nil {
newL7Rules = &PerSelectorPolicy{}
}
// Merge isRedirect flag
l7Rules.isRedirect = l7Rules.isRedirect || newL7Rules.isRedirect
// Merge listener reference
if err := l7Rules.mergeListenerReference(newL7Rules); err != nil {
ctx.PolicyTrace(" Merge conflict: %s\n", err.Error())
return err
}
if l7Rules.Authentication == nil || newL7Rules.Authentication == nil {
if newL7Rules.Authentication != nil {
l7Rules.Authentication = newL7Rules.Authentication
}
} else if !newL7Rules.Authentication.DeepEqual(l7Rules.Authentication) {
ctx.PolicyTrace(" Merge conflict: mismatching auth types %s/%s\n", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode)
return fmt.Errorf("cannot merge conflicting authentication types (%s/%s)", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode)
}
if l7Rules.TerminatingTLS == nil || newL7Rules.TerminatingTLS == nil {
if newL7Rules.TerminatingTLS != nil {
l7Rules.TerminatingTLS = newL7Rules.TerminatingTLS
}
} else if !newL7Rules.TerminatingTLS.Equal(l7Rules.TerminatingTLS) {
ctx.PolicyTrace(" Merge conflict: mismatching terminating TLS contexts %v/%v\n", newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS)
return fmt.Errorf("cannot merge conflicting terminating TLS contexts for cached selector %s: (%v/%v)", cs.String(), newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS)
}
if l7Rules.OriginatingTLS == nil || newL7Rules.OriginatingTLS == nil {
if newL7Rules.OriginatingTLS != nil {
l7Rules.OriginatingTLS = newL7Rules.OriginatingTLS
}
} else if !newL7Rules.OriginatingTLS.Equal(l7Rules.OriginatingTLS) {
ctx.PolicyTrace(" Merge conflict: mismatching originating TLS contexts %v/%v\n", newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS)
return fmt.Errorf("cannot merge conflicting originating TLS contexts for cached selector %s: (%v/%v)", cs.String(), newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS)
}
// For now we simply merge the set of allowed SNIs from different rules
// to/from the *same remote*, port, and protocol. This means that if any
// rule requires SNI, then all traffic to that remote/port requires TLS,
// even if other merged rules would be fine without TLS. Any SNI from all
// applicable rules is allowed.
//
// Preferably we could allow different rules for each SNI, but for now the
// combination of all L7 rules is allowed for all the SNIs. For example, if
// SNI and TLS termination are used together so that L7 filtering is
// possible, in this example:
//
// - existing: SNI: public.example.com
// - new: SNI: private.example.com HTTP: path="/public"
//
// Separately, these rule allow access to all paths at SNI
// public.example.com and path private.example.com/public, but currently we
// allow all paths also at private.example.com. This may be clamped down if
// there is sufficient demand for SNI and TLS termination together.
//
// Note however that SNI rules are typically used with `toFQDNs`, each of
// which defines a separate destination, so that SNIs for different
// `toFQDNs` will not be merged together.
l7Rules.ServerNames = l7Rules.ServerNames.Merge(newL7Rules.ServerNames)
// L7 rules can be applied with SNI filtering only if the TLS is also
// terminated
if len(l7Rules.ServerNames) > 0 && !l7Rules.L7Rules.IsEmpty() && l7Rules.TerminatingTLS == nil {
ctx.PolicyTrace(" Merge conflict: cannot use SNI filtering with L7 rules without TLS termination: %v\n", l7Rules.ServerNames)
return fmt.Errorf("cannot merge L7 rules for cached selector %s with SNI filtering without TLS termination: %v", cs.String(), l7Rules.ServerNames)
}
// empty L7 rules effectively wildcard L7. When merging with a non-empty
// rule, the empty must be expanded to an actual wildcard rule for the
// specific L7
if !l7Rules.HasL7Rules() && newL7Rules.HasL7Rules() {
l7Rules.L7Rules = newL7Rules.appendL7WildcardRule(ctx)
existingFilter.PerSelectorPolicies[cs] = l7Rules
continue
}
if l7Rules.HasL7Rules() && !newL7Rules.HasL7Rules() {
l7Rules.appendL7WildcardRule(ctx)
existingFilter.PerSelectorPolicies[cs] = l7Rules
continue
}
// We already know from the L7Parser.Merge() above that there are no
// conflicting parser types, and rule validation only allows one type of L7
// rules in a rule, so we can just merge the rules here.
for _, newRule := range newL7Rules.HTTP {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.HTTP = append(l7Rules.HTTP, newRule)
}
}
for _, newRule := range newL7Rules.Kafka {
if !newRule.Exists(l7Rules.L7Rules.Kafka) {
l7Rules.Kafka = append(l7Rules.Kafka, newRule)
}
}
if l7Rules.L7Proto == "" && newL7Rules.L7Proto != "" {
l7Rules.L7Proto = newL7Rules.L7Proto
}
for _, newRule := range newL7Rules.L7 {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.L7 = append(l7Rules.L7, newRule)
}
}
for _, newRule := range newL7Rules.DNS {
if !newRule.Exists(l7Rules.L7Rules) {
l7Rules.DNS = append(l7Rules.DNS, newRule)
}
}
// Update the pointer in the map in case it was newly allocated
existingFilter.PerSelectorPolicies[cs] = l7Rules
} else { // 'cs' is not in the existing filter yet
// Update selector owner to the existing filter
selectorCache.ChangeUser(cs, filterToMerge, existingFilter)
// Move L7 rules over.
existingFilter.PerSelectorPolicies[cs] = newL7Rules
if cs.IsWildcard() {
existingFilter.wildcard = cs
}
}
}
return nil
}
// mergeIngressPortProto merges all rules which share the same port & protocol that
// select a given set of endpoints. It updates the L4Filter mapped to by the specified
// port and protocol with the contents of the provided PortRule. If the rule
// being merged has conflicting L7 rules with those already in the provided
// L4PolicyMap for the specified port-protocol tuple, it returns an error.
//
// If any rules contain L7 rules that select Host or Remote Node and we should
// accept all traffic from host, the L7 rules will be translated into L7
// wildcards via 'hostWildcardL7'. That is to say, traffic will be
// forwarded to the proxy for endpoints matching those labels, but the proxy
// will allow all such traffic.
func mergeIngressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string,
r api.Ports, p api.PortProtocol, proto api.L4Proto, ruleLabels stringLabels, resMap L4PolicyMap) (int, error) {
// Create a new L4Filter
filterToMerge, err := createL4IngressFilter(policyCtx, endpoints, auth, hostWildcardL7, r, p, proto, ruleLabels)
if err != nil {
return 0, err
}
err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge)
if err != nil {
return 0, err
}
return 1, err
}
func traceL3(ctx *SearchContext, peerEndpoints api.EndpointSelectorSlice, direction string, isDeny bool) {
var result strings.Builder
// Requirements will be cloned into every selector, only trace them once.
if len(peerEndpoints[0].MatchExpressions) > 0 {
sel := peerEndpoints[0]
result.WriteString(" Enforcing requirements ")
result.WriteString(fmt.Sprintf("%+v", sel.MatchExpressions))
result.WriteString("\n")
}
// EndpointSelector
for _, sel := range peerEndpoints {
if len(sel.MatchLabels) > 0 {
if !isDeny {
result.WriteString(" Allows ")
} else {
result.WriteString(" Denies ")
}
result.WriteString(direction)
result.WriteString(" labels ")
result.WriteString(sel.String())
result.WriteString("\n")
}
}
ctx.PolicyTrace(result.String()) //nolint:govet
}
// portRulesCoverContext determines whether L4 portions of rules cover the
// specified port models.
//
// Returns true if the list of ports is 0, or the rules match the ports.
func rulePortsCoverSearchContext(ports []api.PortProtocol, ctx *SearchContext) bool {
if len(ctx.DPorts) == 0 {
return true
}
for _, p := range ports {
for _, dp := range ctx.DPorts {
tracePort := api.PortProtocol{
Protocol: api.L4Proto(dp.Protocol),
}
if dp.Name != "" {
tracePort.Port = dp.Name
} else {
tracePort.Port = strconv.FormatUint(uint64(dp.Port), 10)
}
if p.Covers(tracePort) {
return true
}
}
}
return false
}
func mergeIngress(policyCtx PolicyContext, ctx *SearchContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, ruleLabels stringLabels, resMap L4PolicyMap) (int, error) {
found := 0
// short-circuit if no endpoint is selected
if fromEndpoints == nil {
return found, nil
}
if ctx.From != nil && len(fromEndpoints) > 0 {
if ctx.TraceEnabled() {
traceL3(ctx, fromEndpoints, "from", policyCtx.IsDeny())
}
if !fromEndpoints.Matches(ctx.From) {
ctx.PolicyTrace(" No label match for %s", ctx.From)
return 0, nil
}
ctx.PolicyTrace(" Found all required labels")
}
// Daemon options may induce L3 allows for host/world. In this case, if
// we find any L7 rules matching host/world then we need to turn any L7
// restrictions on these endpoints into L7 allow-all so that the
// traffic is always allowed, but is also always redirected through the
// proxy
hostWildcardL7 := make([]string, 0, 2)
if option.Config.AlwaysAllowLocalhost() {
hostWildcardL7 = append(hostWildcardL7, labels.IDNameHost)
}
var (
cnt int
err error
)
// L3-only rule (with requirements folded into fromEndpoints).
if toPorts.Len() == 0 && icmp.Len() == 0 && len(fromEndpoints) > 0 {
cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, ruleLabels, resMap)
if err != nil {
return found, err
}
}
found += cnt
err = toPorts.Iterate(func(r api.Ports) error {
// For L4 Policy, an empty slice of EndpointSelector indicates that the
// rule allows all at L3 - explicitly specify this by creating a slice
// with the WildcardEndpointSelector.
if len(fromEndpoints) == 0 {
fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
ctx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols())
} else {
ctx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols())
}
if !rulePortsCoverSearchContext(r.GetPortProtocols(), ctx) {
ctx.PolicyTrace(" No port match found\n")
return nil
}
pr := r.GetPortRule()
if pr != nil {
if pr.Rules != nil && pr.Rules.L7Proto != "" {
ctx.PolicyTrace(" l7proto: \"%s\"\n", pr.Rules.L7Proto)
}
if !pr.Rules.IsEmpty() {
for _, l7 := range pr.Rules.HTTP {
ctx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.Kafka {
ctx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.L7 {
ctx.PolicyTrace(" %+v\n", l7)
}
}
}
for _, p := range r.GetPortProtocols() {
if p.Protocol.IsAny() {
cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoTCP, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoUDP, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoSCTP, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
} else {
cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
}
}
return nil
})
if err != nil {
return found, err
}
err = icmp.Iterate(func(r api.Ports) error {
if len(fromEndpoints) == 0 {
fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
ctx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols())
} else {
ctx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols())
}
if !rulePortsCoverSearchContext(r.GetPortProtocols(), ctx) {
ctx.PolicyTrace(" No ICMP type match found\n")
return nil
}
for _, p := range r.GetPortProtocols() {
cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap)
if err != nil {
return err
}
found += cnt
}
return nil
})
return found, err
}
func (state *traceState) selectRule(ctx *SearchContext, r *rule) {
ctx.PolicyTrace("* Rule %s: selected\n", r)
state.selectedRules++
}
func (state *traceState) unSelectRule(ctx *SearchContext, labels labels.LabelArray, r *rule) {
ctx.PolicyTraceVerbose(" Rule %s: did not select %+v\n", r, labels)
}
// resolveIngressPolicy analyzes the rule against the given SearchContext, and
// merges it with any prior-generated policy within the provided L4Policy.
// Requirements based off of all Ingress requirements (set in FromRequires) in
// other rules are stored in the specified slice of LabelSelectorRequirement.
// These requirements are dynamically inserted into a copy of the receiver rule,
// as requirements form conjunctions across all rules.
func (r *rule) resolveIngressPolicy(
policyCtx PolicyContext,
ctx *SearchContext,
state *traceState,
result L4PolicyMap,
requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement,
) (
L4PolicyMap, error,
) {
if !ctx.rulesSelect {
if !r.getSelector().Matches(ctx.To) {
state.unSelectRule(ctx, ctx.To, r)
return nil, nil
}
}
state.selectRule(ctx, r)
found, foundDeny := 0, 0
if len(r.Ingress) == 0 && len(r.IngressDeny) == 0 {
ctx.PolicyTrace(" No ingress rules\n")
}
for _, ingressRule := range r.Ingress {
fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirements)
cnt, err := mergeIngress(policyCtx, ctx, fromEndpoints, ingressRule.Authentication, ingressRule.ToPorts, ingressRule.ICMPs, makeStringLabels(r.Rule.Labels), result)
if err != nil {
return nil, err
}
if cnt > 0 {
found += cnt
}
}
oldDeny := policyCtx.SetDeny(true)
defer func() {
policyCtx.SetDeny(oldDeny)
}()
for _, ingressRule := range r.IngressDeny {
fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirementsDeny)
cnt, err := mergeIngress(policyCtx, ctx, fromEndpoints, nil, ingressRule.ToPorts, ingressRule.ICMPs, makeStringLabels(r.Rule.Labels), result)
if err != nil {
return nil, err
}
if cnt > 0 {
foundDeny += cnt
}
}
if found+foundDeny > 0 {
if found != 0 {
state.matchedRules++
}
if foundDeny != 0 {
state.matchedDenyRules++
}
return result, nil
}
return nil, nil
}
func (r *rule) matchesSubject(securityIdentity *identity.Identity) bool {
subjectIsNode := securityIdentity.ID == identity.ReservedIdentityHost
ruleSelectsNode := r.NodeSelector.LabelSelector != nil
// Short-circuit if the rule's selector type (node vs. endpoint) does not match the
// identity's type
if ruleSelectsNode != subjectIsNode {
return false
}
// Fall back to explicit label matching for the local node
// because local node has mutable labels, which are applied asynchronously to the SelectorCache.
if r.subjectSelector == nil || ruleSelectsNode {
return r.getSelector().Matches(securityIdentity.LabelArray)
}
return r.subjectSelector.Selects(versioned.Latest(), securityIdentity.ID)
}
func (r *rule) getSubjects() []identity.NumericIdentity {
if r.NodeSelector.LabelSelector != nil {
return []identity.NumericIdentity{identity.ReservedIdentityHost}
}
return r.subjectSelector.GetSelections(versioned.Latest())
}
// ****************** EGRESS POLICY ******************
func mergeEgress(policyCtx PolicyContext, ctx *SearchContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, ruleLabels stringLabels, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) {
found := 0
// short-circuit if no endpoint is selected
if toEndpoints == nil {
return found, nil
}
if ctx.To != nil && len(toEndpoints) > 0 {
if ctx.TraceEnabled() {
traceL3(ctx, toEndpoints, "to", policyCtx.IsDeny())
}
if !toEndpoints.Matches(ctx.To) {
ctx.PolicyTrace(" No label match for %s", ctx.To)
return 0, nil
}
ctx.PolicyTrace(" Found all required labels")
}
var (
cnt int
err error
)
// L3-only rule (with requirements folded into toEndpoints).
if toPorts.Len() == 0 && icmp.Len() == 0 && len(toEndpoints) > 0 {
cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, ruleLabels, resMap, fqdns)
if err != nil {
return found, err
}
}
found += cnt
err = toPorts.Iterate(func(r api.Ports) error {
// For L4 Policy, an empty slice of EndpointSelector indicates that the
// rule allows all at L3 - explicitly specify this by creating a slice
// with the WildcardEndpointSelector.
if len(toEndpoints) == 0 {
toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
ctx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols())
} else {
ctx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols())
}
pr := r.GetPortRule()
if pr != nil {
if !pr.Rules.IsEmpty() {
for _, l7 := range pr.Rules.HTTP {
ctx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.Kafka {
ctx.PolicyTrace(" %+v\n", l7)
}
for _, l7 := range pr.Rules.L7 {
ctx.PolicyTrace(" %+v\n", l7)
}
}
}
for _, p := range r.GetPortProtocols() {
if p.Protocol.IsAny() {
cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoTCP, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoUDP, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoSCTP, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
} else {
cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
}
}
return nil
},
)
if err != nil {
return found, err
}
err = icmp.Iterate(func(r api.Ports) error {
if len(toEndpoints) == 0 {
toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
}
if !policyCtx.IsDeny() {
ctx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols())
} else {
ctx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols())
}
for _, p := range r.GetPortProtocols() {
cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns)
if err != nil {
return err
}
found += cnt
}
return nil
})
return found, err
}
// mergeEgressPortProto merges all rules which share the same port & protocol that
// select a given set of endpoints. It updates the L4Filter mapped to by the specified
// port and protocol with the contents of the provided PortRule. If the rule
// being merged has conflicting L7 rules with those already in the provided
// L4PolicyMap for the specified port-protocol tuple, it returns an error.
func mergeEgressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, r api.Ports, p api.PortProtocol,
proto api.L4Proto, ruleLabels stringLabels, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) {
// Create a new L4Filter
filterToMerge, err := createL4EgressFilter(policyCtx, endpoints, auth, r, p, proto, ruleLabels, fqdns)
if err != nil {
return 0, err
}
err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge)
if err != nil {
return 0, err
}
return 1, err
}
func (r *rule) resolveEgressPolicy(
policyCtx PolicyContext,
ctx *SearchContext,
state *traceState,
result L4PolicyMap,
requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement,
) (
L4PolicyMap, error,
) {
if !ctx.rulesSelect {
if !r.getSelector().Matches(ctx.From) {
state.unSelectRule(ctx, ctx.From, r)
return nil, nil
}
}
state.selectRule(ctx, r)
found, foundDeny := 0, 0
if len(r.Egress) == 0 && len(r.EgressDeny) == 0 {
ctx.PolicyTrace(" No egress rules\n")
}
for _, egressRule := range r.Egress {
toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirements)
cnt, err := mergeEgress(policyCtx, ctx, toEndpoints, egressRule.Authentication, egressRule.ToPorts, egressRule.ICMPs, makeStringLabels(r.Rule.Labels), result, egressRule.ToFQDNs)
if err != nil {
return nil, err
}
if cnt > 0 {
found += cnt
}
}
oldDeny := policyCtx.SetDeny(true)
defer func() {
policyCtx.SetDeny(oldDeny)
}()
for _, egressRule := range r.EgressDeny {
toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirementsDeny)
cnt, err := mergeEgress(policyCtx, ctx, toEndpoints, nil, egressRule.ToPorts, egressRule.ICMPs, makeStringLabels(r.Rule.Labels), result, nil)
if err != nil {
return nil, err
}
if cnt > 0 {
foundDeny += cnt
}
}
if found+foundDeny > 0 {
if found != 0 {
state.matchedRules++
}
if foundDeny != 0 {
state.matchedDenyRules++
}
return result, nil
}
return nil, nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"fmt"
"strings"
"testing"
"github.com/cilium/proxy/pkg/policy/api/kafka"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/u8proto"
)
func TestL4Policy(t *testing.T) {
checkPolicy := func(expected, actual *L4Policy) {
t.Helper()
require.True(t, expected.Ingress.PortRules.TestingOnlyEquals(actual.Ingress.PortRules), expected.Ingress.PortRules.TestingOnlyDiff(actual.Ingress.PortRules))
require.True(t, expected.Egress.PortRules.TestingOnlyEquals(actual.Egress.PortRules), expected.Egress.PortRules.TestingOnlyDiff(actual.Egress.PortRules))
}
td := newTestData().withIDs(ruleTestIDs)
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
{Port: "8080", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
}
td.repo.mustAdd(rule1)
l7rules := api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
}
l7map := L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: l7rules,
isRedirect: true,
},
}
expected := NewL4Policy(0)
expected.Ingress.PortRules.Upsert("80", 0, "TCP", &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http", PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Ingress.PortRules.Upsert("8080", 0, "TCP", &L4Filter{
Port: 8080, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http", PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "TCP", &L4Filter{
Port: 3000, Protocol: api.ProtoTCP, U8Proto: 6, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "UDP", &L4Filter{
Port: 3000, Protocol: api.ProtoUDP, U8Proto: 17, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "SCTP", &L4Filter{
Port: 3000, Protocol: api.ProtoSCTP, U8Proto: 132, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
pol1, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol1.Detach()
checkPolicy(&expected, &pol1.L4Policy)
// This rule actually overlaps with the existing ingress "http" rule,
// so we'd expect it to merge.
td.resetRepo()
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
}
td.repo.mustAdd(rule2)
expected = NewL4Policy(0)
expected.Ingress.PortRules.Upsert("80", 0, "TCP", &L4Filter{
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "TCP", &L4Filter{
Port: 3000, Protocol: api.ProtoTCP, U8Proto: 6, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "UDP", &L4Filter{
Port: 3000, Protocol: api.ProtoUDP, U8Proto: 17, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
expected.Egress.PortRules.Upsert("3000", 0, "SCTP", &L4Filter{
Port: 3000, Protocol: api.ProtoSCTP, U8Proto: 132, Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
})
pol2, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol2.Detach()
checkPolicy(&expected, &pol2.L4Policy)
}
func TestMergeL4PolicyIngress(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
rule := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{fooSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{bazSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
td.repo.mustAdd(rule)
mergedES := L7DataMap{
td.cachedFooSelector: nil,
td.cachedBazSelector: nil,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: ParserTypeNone, PerSelectorPolicies: mergedES, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedFooSelector: {nil},
td.cachedBazSelector: {nil},
}),
}})
pol, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
require.True(t, expected.TestingOnlyEquals(pol.L4Policy.Ingress.PortRules))
}
func TestMergeL4PolicyEgress(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
// A can access B with TCP on port 80, and C with TCP on port 80.
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}
td.repo.mustAdd(rule1)
mergedES := L7DataMap{
td.cachedSelectorB: nil,
td.cachedSelectorC: nil,
}
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: ParserTypeNone, PerSelectorPolicies: mergedES, Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}})
pol, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
require.True(t, expected.TestingOnlyEquals(pol.L4Policy.Egress.PortRules), expected.TestingOnlyDiff(pol.L4Policy.Egress.PortRules))
}
func TestMergeL7PolicyIngress(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}
td.repo.mustAdd(rule1)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}, {}},
},
isRedirect: true,
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
sp, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
require.True(t, sp.L4Policy.Ingress.PortRules.TestingOnlyEquals(expected), sp.L4Policy.Ingress.PortRules.TestingOnlyDiff(expected))
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}
l7rules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
l7map := L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: l7rules,
isRedirect: true,
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Rules: l7rules,
isRedirect: true,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "kafka", PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
td.resetRepo()
td.repo.mustAdd(rule2)
sp, err = td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
require.True(t, sp.L4Policy.Ingress.PortRules.TestingOnlyEquals(expected), sp.L4Policy.Ingress.PortRules.TestingOnlyDiff(expected))
// Ensure that adding rule1 (80: http) and rule2 (80: kafka) results in an error
td.repo.mustAdd(rule1)
_, err = td.repo.resolvePolicyLocked(idA)
require.Error(t, err)
td.resetRepo()
// Similar to 'rule2', but with different topics for the l3-dependent
// rule and the l4-only rule.
rule3 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: api.EndpointSelectorSlice{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "bar"},
},
},
}},
},
},
}
td.repo.mustAdd(rule3)
fooRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
barRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "bar"}},
}
// The L3-dependent L7 rules are not merged together.
l7map = L7DataMap{
td.cachedSelectorB: &PerSelectorPolicy{
L7Rules: fooRules,
isRedirect: true,
},
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: barRules,
isRedirect: true,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "kafka", PerSelectorPolicies: l7map, Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
sp, err = td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
require.True(t, sp.L4Policy.Ingress.PortRules.TestingOnlyEquals(expected), sp.L4Policy.Ingress.PortRules.TestingOnlyDiff(expected))
}
func TestMergeL7PolicyEgress(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
// Note that this allows all on 80, so the result should wildcard HTTP
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/public"},
},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/private"},
},
},
}},
},
},
}
td.repo.mustAdd(rule1)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/public", Method: "GET"}, {}},
},
isRedirect: true,
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/private", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.wildcardCachedSelector: {nil},
td.cachedSelectorB: {nil},
}),
}})
sp, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
require.True(t, sp.L4Policy.Egress.PortRules.TestingOnlyEquals(expected), sp.L4Policy.Egress.PortRules.TestingOnlyDiff(expected))
td.resetRepo()
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
// Note that this allows all on 9092, so the result should wildcard Kafka
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "9092", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
},
}
td.repo.mustAdd(rule2)
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"9092/TCP": {
Port: 9092, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: ParserTypeKafka,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}, {}},
},
isRedirect: true,
},
td.cachedSelectorB: &PerSelectorPolicy{
L7Rules: api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
sp, err = td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
require.True(t, sp.L4Policy.Egress.PortRules.TestingOnlyEquals(expected), sp.L4Policy.Egress.PortRules.TestingOnlyDiff(expected))
td.repo.mustAdd(rule1)
_, err = td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
// Similar to 'rule2', but with different topics for the l3-dependent
// rule and the l4-only rule.
rule3 := api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "foo"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
Kafka: []kafka.PortRule{
{Topic: "bar"},
},
},
}},
},
},
}
td.resetRepo()
td.repo.mustAdd(rule3)
fooRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "foo"}},
}
barRules := api.L7Rules{
Kafka: []kafka.PortRule{{Topic: "bar"}},
}
// The l3-dependent l7 rules are not merged together.
l7map := L7DataMap{
td.cachedSelectorB: &PerSelectorPolicy{
L7Rules: fooRules,
isRedirect: true,
},
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: barRules,
isRedirect: true,
},
}
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"80/TCP": {
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "kafka", PerSelectorPolicies: l7map, Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.wildcardCachedSelector: {nil},
}),
}})
sp, err = td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
require.True(t, sp.L4Policy.Egress.PortRules.TestingOnlyEquals(expected), sp.L4Policy.Egress.PortRules.TestingOnlyDiff(expected))
}
func TestRuleWithNoEndpointSelector(t *testing.T) {
apiRule1 := api.Rule{
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{
"10.0.1.0/24",
"192.168.2.0",
"10.0.3.1",
"2001:db8::1/48",
"2001:db9::",
},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
},
}, {
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: []api.CIDRRule{{Cidr: api.CIDR("10.0.0.0/8"), ExceptCIDRs: []api.CIDR{"10.96.0.0/12"}}},
},
},
},
}
err := apiRule1.Sanitize()
require.Error(t, err)
}
func TestL3Policy(t *testing.T) {
apiRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{
"10.0.1.0/24",
"192.168.2.0",
"10.0.3.1",
"2001:db8::1/48",
"2001:db9::",
},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
},
}, {
EgressCommonRule: api.EgressCommonRule{
ToCIDRSet: []api.CIDRRule{{Cidr: api.CIDR("10.0.0.0/8"), ExceptCIDRs: []api.CIDR{"10.96.0.0/12"}}},
},
},
},
}
err := apiRule1.Sanitize()
require.NoError(t, err)
rule1 := &rule{Rule: apiRule1}
err = rule1.Sanitize()
require.NoError(t, err)
// Must be parsable, make sure Validate fails when not.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1..0/24"},
},
}},
}).Sanitize()
require.Error(t, err)
// Test CIDRRule with no provided CIDR or ExceptionCIDR.
// Should fail as CIDR is required.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.Error(t, err)
// Test CIDRRule with only CIDR provided; should not fail, as ExceptionCIDR
// is optional.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.1.0/24", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.NoError(t, err)
// Cannot provide just an IP to a CIDRRule; Cidr must be of format
// <IP>/<prefix>.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.1.32", ExceptCIDRs: nil}},
},
}},
}).Sanitize()
require.Error(t, err)
// Cannot exclude a range that is not part of the CIDR.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDRSet: []api.CIDRRule{{Cidr: "10.0.0.0/10", ExceptCIDRs: []api.CIDR{"10.64.0.0/11"}}},
},
}},
}).Sanitize()
require.Error(t, err)
// Must have a contiguous mask, make sure Validate fails when not.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/128.0.0.128"},
},
}},
}).Sanitize()
require.Error(t, err)
// Prefix length must be in range for the address, make sure
// Validate fails if given prefix length is out of range.
err = (&api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/34"},
},
}},
}).Sanitize()
require.Error(t, err)
}
func TestICMPPolicy(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
// A rule for ICMP
i8 := intstr.FromInt(8)
i9 := intstr.FromInt(9)
rule1 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &i8,
}},
}},
},
},
Egress: []api.EgressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &i9,
}},
}},
},
},
}
td.repo.mustAdd(rule1)
expectedIn := NewL4PolicyMapWithValues(map[string]*L4Filter{"ICMP/8": {
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
expectedOut := NewL4PolicyMapWithValues(map[string]*L4Filter{"ICMP/9": {
Port: 9,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: false,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
pol, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
require.True(t, expectedIn.TestingOnlyEquals(pol.L4Policy.Ingress.PortRules), expectedIn.TestingOnlyDiff(pol.L4Policy.Ingress.PortRules))
require.True(t, expectedOut.TestingOnlyEquals(pol.L4Policy.Egress.PortRules), expectedOut.TestingOnlyDiff(pol.L4Policy.Egress.PortRules))
// A rule for Ports and ICMP
rule2 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
}, {
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Type: &i8,
}},
}},
},
},
}
td.resetRepo()
td.repo.mustAdd(rule2)
expected := NewL4PolicyMapWithValues(map[string]*L4Filter{
"ICMP/8": {
Port: 8,
Protocol: api.ProtoICMP,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
"TCP/80": {
Port: 80,
Protocol: api.ProtoTCP,
U8Proto: u8proto.ProtoIDs["tcp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
},
})
pol, err = td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
require.True(t, expected.TestingOnlyEquals(pol.L4Policy.Ingress.PortRules), expected.TestingOnlyDiff(pol.L4Policy.Ingress.PortRules))
// A rule for ICMPv6
icmpV6Type := intstr.FromInt(128)
rule3 := api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ICMPs: api.ICMPRules{{
Fields: []api.ICMPField{{
Family: "IPv6",
Type: &icmpV6Type,
}},
}},
},
},
}
td.resetRepo()
td.repo.mustAdd(rule3)
expected = NewL4PolicyMapWithValues(map[string]*L4Filter{"ICMPV6/128": {
Port: 128,
Protocol: api.ProtoICMPv6,
U8Proto: u8proto.ProtoIDs["icmp"],
Ingress: true,
wildcard: td.wildcardCachedSelector,
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
},
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.wildcardCachedSelector: {nil}}),
}})
pol, err = td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
require.True(t, expected.TestingOnlyEquals(pol.L4Policy.Ingress.PortRules), expected.TestingOnlyDiff(pol.L4Policy.Ingress.PortRules))
}
// Tests the restrictions of combining certain label-based L3 and L4 policies.
// This ensures that the user is informed of policy combinations that are not
// implemented in the datapath.
func TestEgressRuleRestrictions(t *testing.T) {
fooSelector := []api.EndpointSelector{
api.NewESFromLabels(labels.ParseSelectLabel("foo")),
}
// Cannot combine ToEndpoints and ToCIDR
apiRule1 := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{
"10.1.0.0/16",
"2001:dbf::/64",
},
ToEndpoints: fooSelector,
},
},
},
}
err := apiRule1.Sanitize()
require.Error(t, err)
}
func TestPolicyEntityValidationEgress(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
},
},
}
require.NoError(t, r.Sanitize())
require.Len(t, r.Egress[0].ToEntities, 1)
r.Egress[0].ToEntities = []api.Entity{api.EntityHost}
require.NoError(t, r.Sanitize())
require.Len(t, r.Egress[0].ToEntities, 1)
r.Egress[0].ToEntities = []api.Entity{"trololo"}
require.Error(t, r.Sanitize())
}
func TestPolicyEntityValidationIngress(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld},
},
},
},
}
require.NoError(t, r.Sanitize())
require.Len(t, r.Ingress[0].FromEntities, 1)
r.Ingress[0].FromEntities = []api.Entity{api.EntityHost}
require.NoError(t, r.Sanitize())
require.Len(t, r.Ingress[0].FromEntities, 1)
r.Ingress[0].FromEntities = []api.Entity{"trololo"}
require.Error(t, r.Sanitize())
}
func TestPolicyEntityValidationEntitySelectorsFill(t *testing.T) {
r := api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEntities: []api.Entity{api.EntityWorld, api.EntityHost},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld, api.EntityHost},
},
},
},
}
require.NoError(t, r.Sanitize())
require.Len(t, r.Ingress[0].FromEntities, 2)
require.Len(t, r.Egress[0].ToEntities, 2)
}
func TestL3RuleLabels(t *testing.T) {
ruleLabels := map[string]labels.LabelArray{
"rule0": labels.ParseLabelArray("name=apiRule0"),
"rule1": labels.ParseLabelArray("name=apiRule1"),
"rule2": labels.ParseLabelArray("name=apiRule2"),
}
rules := map[string]api.Rule{
"rule0": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule0"],
Ingress: []api.IngressRule{{}},
Egress: []api.EgressRule{{}},
},
"rule1": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule1"],
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.1.0/32"},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{"10.1.0.0/32"},
},
},
},
},
"rule2": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule2"],
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromCIDR: []api.CIDR{"10.0.2.0/32"},
},
},
},
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToCIDR: []api.CIDR{"10.2.0.0/32"},
},
},
},
},
}
testCases := []struct {
description string // the description to print in asserts
rulesToApply []string // the rules from the rules map to resolve, in order
expectedIngressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, per CIDR prefix
expectedEgressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, per CIDR prefix
}{
{
description: "Empty rule that matches. Should not apply labels",
rulesToApply: []string{"rule0"},
expectedIngressLabels: nil,
expectedEgressLabels: nil,
}, {
description: "A rule that matches. Should apply labels",
rulesToApply: []string{"rule1"},
expectedIngressLabels: map[string]labels.LabelArrayList{"10.0.1.0/32": {ruleLabels["rule1"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{"10.1.0.0/32": {ruleLabels["rule1"]}},
}, {
description: "Multiple matching rules. Should apply labels from all that have rule entries",
rulesToApply: []string{"rule0", "rule1", "rule2"},
expectedIngressLabels: map[string]labels.LabelArrayList{
"10.0.1.0/32": {ruleLabels["rule1"]},
"10.0.2.0/32": {ruleLabels["rule2"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{
"10.1.0.0/32": {ruleLabels["rule1"]},
"10.2.0.0/32": {ruleLabels["rule2"]}},
}}
// endpoint selector for all tests
for i, test := range testCases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
for _, r := range test.rulesToApply {
td.repo.mustAdd(rules[r])
}
finalPolicy, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
type expectedResult map[string]labels.LabelArrayList
mapDirectionalResultsToExpectedOutput := map[*L4Filter]expectedResult{
finalPolicy.L4Policy.Ingress.PortRules.ExactLookup("0", 0, "ANY"): test.expectedIngressLabels,
finalPolicy.L4Policy.Egress.PortRules.ExactLookup("0", 0, "ANY"): test.expectedEgressLabels,
}
for filter, exp := range mapDirectionalResultsToExpectedOutput {
if len(exp) > 0 {
for cidr, rule := range exp {
matches := false
for _, origin := range filter.RuleOrigin {
lbls := origin.GetLabelArrayList()
if lbls.Equals(rule) {
matches = true
break
}
}
require.True(t, matches, "%s: expected filter %+v to be derived from rule %s", test.description, filter, rule)
matches = false
for sel := range filter.PerSelectorPolicies {
cidrLabels := labels.ParseLabelArray("cidr:" + cidr)
t.Logf("Testing %+v", cidrLabels)
if matches = sel.(*identitySelector).source.(*labelIdentitySelector).xxxMatches(cidrLabels); matches {
break
}
}
require.True(t, matches, "%s: expected cidr %s to match filter %+v", test.description, cidr, filter)
}
}
}
})
}
}
func TestL4RuleLabels(t *testing.T) {
ruleLabels := map[string]labels.LabelArray{
"rule0": labels.ParseLabelArray("name=apiRule0"),
"rule1": labels.ParseLabelArray("name=apiRule1"),
"rule2": labels.ParseLabelArray("name=apiRule2"),
}
rules := map[string]api.Rule{
"rule0": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule0"],
Ingress: []api.IngressRule{{}},
Egress: []api.EgressRule{{}},
},
"rule1": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule1"],
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1010", Protocol: api.ProtoTCP}},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1100", Protocol: api.ProtoTCP}},
}},
},
},
},
"rule2": {
EndpointSelector: endpointSelectorA,
Labels: ruleLabels["rule2"],
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1020", Protocol: api.ProtoTCP}},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{{Port: "1200", Protocol: api.ProtoTCP}},
}},
},
},
},
}
testCases := []struct {
description string // the description to print in asserts
rulesToApply []string // the rules from the rules map to resolve, in order
expectedIngressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, in order
expectedEgressLabels map[string]labels.LabelArrayList // the slice of LabelArray we should see, in order
}{
{
description: "Empty rule that matches. Should not apply labels",
rulesToApply: []string{"rule0"},
expectedIngressLabels: map[string]labels.LabelArrayList{},
expectedEgressLabels: map[string]labels.LabelArrayList{},
},
{
description: "A rule that matches. Should apply labels",
rulesToApply: []string{"rule1"},
expectedIngressLabels: map[string]labels.LabelArrayList{"1010/TCP": {ruleLabels["rule1"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{"1100/TCP": {ruleLabels["rule1"]}},
}, {
description: "Multiple matching rules. Should apply labels from all that have rule entries",
rulesToApply: []string{"rule0", "rule1", "rule2"},
expectedIngressLabels: map[string]labels.LabelArrayList{
"1010/TCP": {ruleLabels["rule1"]},
"1020/TCP": {ruleLabels["rule2"]}},
expectedEgressLabels: map[string]labels.LabelArrayList{
"1100/TCP": {ruleLabels["rule1"]},
"1200/TCP": {ruleLabels["rule2"]}},
}}
// endpoint selector for all tests
for i, test := range testCases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
for _, r := range test.rulesToApply {
td.repo.mustAdd(rules[r])
}
finalPolicy, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
require.Equal(t, len(test.expectedIngressLabels), finalPolicy.L4Policy.Ingress.PortRules.Len(), test.description)
for portProto := range test.expectedIngressLabels {
portProtoSlice := strings.Split(portProto, "/")
out := finalPolicy.L4Policy.Ingress.PortRules.ExactLookup(portProtoSlice[0], 0, portProtoSlice[1])
require.NotNil(t, out, test.description)
require.Len(t, out.RuleOrigin, 1, test.description)
lbls := out.RuleOrigin[out.wildcard].GetLabelArrayList()
require.EqualValues(t, test.expectedIngressLabels[portProto], lbls, test.description)
}
require.Equal(t, len(test.expectedEgressLabels), finalPolicy.L4Policy.Egress.PortRules.Len(), test.description)
for portProto := range test.expectedEgressLabels {
portProtoSlice := strings.Split(portProto, "/")
out := finalPolicy.L4Policy.Egress.PortRules.ExactLookup(portProtoSlice[0], 0, portProtoSlice[1])
require.NotNil(t, out, test.description)
require.Len(t, out.RuleOrigin, 1, test.description)
lbls := out.RuleOrigin[out.wildcard].GetLabelArrayList()
require.EqualValues(t, test.expectedEgressLabels[portProto], lbls, test.description)
}
})
}
}
var (
labelsA = labels.LabelArray{
labels.NewLabel("id", "a", labels.LabelSourceK8s),
}
idA = identity.NewIdentity(1001, labelsA.Labels())
endpointSelectorA = api.NewESFromLabels(labels.ParseSelectLabel("id=a"))
labelsB = labels.LabelArray{
labels.NewLabel("id1", "b", labels.LabelSourceK8s),
labels.NewLabel("id2", "t", labels.LabelSourceK8s),
}
idB = identity.NewIdentity(1002, labelsB.Labels())
endpointSelectorB = api.NewESFromLabels(labels.ParseSelectLabel("id1=b"))
labelsC = labels.LabelArray{
labels.NewLabel("id", "t", labels.LabelSourceK8s),
}
idC = identity.NewIdentity(1003, labelsC.Labels())
endpointSelectorC = api.NewESFromLabels(labels.ParseSelectLabel("id=t"))
flowAToB = Flow{From: idA, To: idB, Proto: u8proto.TCP, Dport: 80}
flowAToC = Flow{From: idA, To: idC, Proto: u8proto.TCP, Dport: 80}
flowAToC90 = Flow{From: idA, To: idC, Proto: u8proto.TCP, Dport: 90}
flowAToWorld80 = Flow{From: idA, To: identity.LookupReservedIdentity(identity.ReservedIdentityWorld), Proto: u8proto.TCP, Dport: 80}
flowAToWorld90 = Flow{From: idA, To: identity.LookupReservedIdentity(identity.ReservedIdentityWorld), Proto: u8proto.TCP, Dport: 90}
ruleTestIDs = identity.IdentityMap{
idA.ID: idA.LabelArray,
idB.ID: idB.LabelArray,
idC.ID: idC.LabelArray,
}
defaultDenyIngress = &api.Rule{
EndpointSelector: api.WildcardEndpointSelector,
Ingress: []api.IngressRule{{}},
}
namedPorts = map[string]uint16{
"port-80": 80,
"port-90": 90,
}
)
func checkFlow(t *testing.T, repo *Repository, flow Flow, verdict api.Decision) {
t.Helper()
srcEP := &EndpointInfo{
ID: 1,
TCPNamedPorts: namedPorts,
}
dstEP := &EndpointInfo{
ID: 2,
TCPNamedPorts: namedPorts,
}
actual, err := LookupFlow(repo, flow, srcEP, dstEP)
require.NoError(t, err)
require.Equal(t, verdict, actual)
}
func TestIngressAllowAll(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
},
},
})
checkFlow(t, repo, flowAToB, api.Denied)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestIngressAllowAllL4Overlap(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
{
// This rule is a subset of the above
// rule and should *NOT* restrict to
// port 80 only
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestIngressAllowAllNamedPort(t *testing.T) {
repo := newTestData().withIDs(ruleTestIDs).repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToB, api.Denied)
checkFlow(t, repo, flowAToC90, api.Denied)
}
func TestIngressAllowAllL4OverlapNamedPort(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
// Allow all L3&L4 ingress rule
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
{
// This rule is a subset of the above
// rule and should *NOT* restrict to
// port 80 only
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestIngressL4AllowAll(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
defaultDenyIngress,
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idC)
require.NoError(t, err)
defer pol.Detach()
filter := pol.L4Policy.Ingress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 1)
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
}
func TestIngressL4AllowAllNamedPort(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorC,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "port-80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idC)
require.NoError(t, err)
defer pol.Detach()
filter := pol.L4Policy.Ingress.PortRules.ExactLookup("port-80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(0), filter.Port)
require.Equal(t, "port-80", filter.PortName)
require.True(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 1)
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
}
func TestEgressAllowAll(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{
api.WildcardEndpointSelector,
},
},
},
},
},
})
checkFlow(t, repo, flowAToB, api.Allowed)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestEgressL4AllowAll(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToB, api.Allowed)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
t.Log(pol.L4Policy.Egress.PortRules)
filter := pol.L4Policy.Egress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.False(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 1)
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
}
func TestEgressL4AllowWorld(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs, identity.ListReservedIdentities())
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToWorld80, api.Allowed)
checkFlow(t, repo, flowAToWorld90, api.Denied)
// Pod to pod must be denied on port 80 and 90, only world was whitelisted
checkFlow(t, repo, flowAToC, api.Denied)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
filter := pol.L4Policy.Egress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.False(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 3)
}
func TestEgressL4AllowAllEntity(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs, identity.ListReservedIdentities())
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
},
})
checkFlow(t, repo, flowAToWorld80, api.Allowed)
checkFlow(t, repo, flowAToWorld90, api.Denied)
// Pod to pod must be allowed on port 80, denied on port 90 (all identity)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Denied)
pol, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
filter := pol.L4Policy.Egress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.False(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 1)
}
func TestEgressL3AllowWorld(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs, identity.ListReservedIdentities())
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityWorld},
},
},
},
},
})
checkFlow(t, repo, flowAToWorld80, api.Allowed)
checkFlow(t, repo, flowAToWorld90, api.Allowed)
// Pod to pod must be denied on port 80 and 90, only world was whitelisted
checkFlow(t, repo, flowAToC, api.Denied)
checkFlow(t, repo, flowAToC90, api.Denied)
}
func TestEgressL3AllowAllEntity(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs, identity.ListReservedIdentities())
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEntities: []api.Entity{api.EntityAll},
},
},
},
},
})
checkFlow(t, repo, flowAToWorld80, api.Allowed)
checkFlow(t, repo, flowAToWorld90, api.Allowed)
// Pod to pod must be allowed on both port 80 and 90 (L3 only rule)
checkFlow(t, repo, flowAToC, api.Allowed)
checkFlow(t, repo, flowAToC90, api.Allowed)
}
func TestL4WildcardMerge(t *testing.T) {
// First, test implicit case.
//
// Test the case where if we have rules that select the same endpoint on the
// same port-protocol tuple with one that is L4-only, and the other applying
// at L4 and L7, that the L4-only rule shadows the L4-L7 rule. This is because
// L4-only rule implicitly allows all traffic at L7, so the L7-related
// parts of the L4-L7 rule are useless.
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{
{"Key": "Value"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
}},
},
},
}})
expected := &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http",
PerSelectorPolicies: L7DataMap{
td.wildcardCachedSelector: nil,
td.cachedSelectorC: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
}),
}
pol1, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol1.Detach()
l4IngressPolicy := pol1.L4Policy.Ingress.PortRules
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 2)
require.NotNil(t, filter.PerSelectorPolicies[td.cachedSelectorC])
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
require.True(t, expected.Equals(filter), "L4filters are not equal. Expected: %s, Actual: %s", expected.String(), filter.String())
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
expectedL7 := &L4Filter{
Port: 7000, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: "testparser",
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: &PerSelectorPolicy{
L7Rules: api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{{"Key": "Value"}, {}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{td.cachedSelectorC: {nil}}),
}
filterL7 := l4IngressPolicy.ExactLookup("7000", 0, "TCP")
require.NotNil(t, filterL7)
require.Equal(t, uint16(7000), filterL7.Port)
require.True(t, filterL7.Ingress)
require.Len(t, filterL7.PerSelectorPolicies, 1)
require.NotNil(t, filterL7.PerSelectorPolicies[td.cachedSelectorC])
require.Nil(t, filterL7.PerSelectorPolicies[td.wildcardCachedSelector])
require.True(t, expectedL7.Equals(filterL7), "L7filters are not equal. Expected: %s, Actual: %s", expectedL7.String(), filterL7.String())
require.Equal(t, L7ParserType("testparser"), filterL7.L7Parser)
// Test the reverse order as well; ensure that we check both conditions
// for if L4-only policy is in the L4Filter for the same port-protocol tuple,
// and L7 metadata exists in the L4Filter we are adding; expect to resolve
// to L4-only policy without any L7-metadata.
repo = td.resetRepo()
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "7000", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
L7Proto: "testparser",
L7: []api.PortRuleL7{
{"Key": "Value"},
},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}})
pol2, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol2.Detach()
l4IngressPolicy = pol2.L4Policy.Ingress.PortRules
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 2)
require.Nil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
require.NotNil(t, filter.PerSelectorPolicies[td.cachedSelectorC])
require.True(t, expected.Equals(filter), "L4filters are not equal. Expected: %s, Actual: %s", expected.String(), filter.String())
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
filterL7 = l4IngressPolicy.ExactLookup("7000", 0, "TCP")
require.NotNil(t, filterL7)
require.Equal(t, uint16(7000), filterL7.Port)
require.True(t, filterL7.Ingress)
require.Len(t, filterL7.PerSelectorPolicies, 1)
require.NotNil(t, filterL7.PerSelectorPolicies[td.cachedSelectorC])
require.Nil(t, filterL7.PerSelectorPolicies[td.wildcardCachedSelector])
require.True(t, expectedL7.Equals(filterL7), "L7filters are not equal. Expected: %s, Actual: %s", expectedL7.String(), filterL7.String())
require.Equal(t, L7ParserType("testparser"), filterL7.L7Parser)
// Second, test the expeicit allow at L3.
repo = td.resetRepo()
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
pol3, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol3.Detach()
l4IngressPolicy = pol3.L4Policy.Ingress.PortRules
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Len(t, filter.PerSelectorPolicies, 2)
require.True(t, expected.Equals(filter), "L4filters are not equal. Expected: %s, Actual: %s", expected.String(), filter.String())
// Test the reverse order as well; ensure that we check both conditions
// for if L4-only policy is in the L4Filter for the same port-protocol tuple,
// and L7 metadata exists in the L4Filter we are adding; expect to resolve
// to L4-only policy without any L7-metadata.
repo = td.resetRepo()
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{api.WildcardEndpointSelector},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}})
pol4, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol4.Detach()
l4IngressPolicy = pol4.L4Policy.Ingress.PortRules
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Len(t, filter.PerSelectorPolicies, 2)
require.True(t, expected.Equals(filter), "L4filters are not equal. Expected: %s, Actual: %s", expected.String(), filter.String())
}
func TestL3L4L7Merge(t *testing.T) {
// First rule allows ingress from all endpoints to port 80 only on
// GET to "/". However, second rule allows all traffic on port 80 only to a
// specific endpoint. When these rules are merged, it equates to allowing
// all traffic from port 80 from any endpoint.
//
// TODO: This comment can't be correct, the resulting policy
// should allow all on port 80 only from endpoint C, traffic
// from all other endpoints should still only allow only GET
// on "/".
td := newTestData().withIDs(ruleTestIDs)
repo := td.repo
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
},
}})
pol1, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol1.Detach()
l4IngressPolicy := pol1.L4Policy.Ingress.PortRules
filter := l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Len(t, filter.PerSelectorPolicies, 2)
require.NotNil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
require.Nil(t, filter.PerSelectorPolicies[td.cachedSelectorC])
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Len(t, filter.PerSelectorPolicies, 2)
expected := &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http",
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: nil,
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
}),
}
require.True(t, expected.Equals(filter))
repo = td.resetRepo()
repo.MustAddList(api.Rules{&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
}})
pol2, err := repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol2.Detach()
l4IngressPolicy = pol2.L4Policy.Ingress.PortRules
filter = l4IngressPolicy.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.True(t, filter.Ingress)
require.Equal(t, ParserTypeHTTP, filter.L7Parser)
require.Len(t, filter.PerSelectorPolicies, 2)
require.NotNil(t, filter.PerSelectorPolicies[td.wildcardCachedSelector])
require.Nil(t, filter.PerSelectorPolicies[td.cachedSelectorC])
expected = &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
wildcard: td.wildcardCachedSelector,
L7Parser: "http",
PerSelectorPolicies: L7DataMap{
td.cachedSelectorC: nil,
td.wildcardCachedSelector: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Path: "/", Method: "GET"}},
},
isRedirect: true,
},
},
Ingress: true,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorC: {nil},
td.wildcardCachedSelector: {nil},
}),
}
require.True(t, expected.Equals(filter))
}
func TestMatches(t *testing.T) {
td := newTestData()
repo := td.repo
repo.MustAddList(api.Rules{
&api.Rule{
EndpointSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
},
},
},
&api.Rule{
NodeSelector: endpointSelectorA,
Ingress: []api.IngressRule{
{
IngressCommonRule: api.IngressCommonRule{
FromEndpoints: []api.EndpointSelector{endpointSelectorC},
},
},
},
},
})
epRule := repo.rules[ruleKey{idx: 0}]
hostRule := repo.rules[ruleKey{idx: 1}]
selectedEpLabels := labels.ParseSelectLabel("id=a")
selectedIdentity := identity.NewIdentity(54321, labels.Labels{selectedEpLabels.Key: selectedEpLabels})
td.addIdentity(selectedIdentity)
notSelectedEpLabels := labels.ParseSelectLabel("id=b")
notSelectedIdentity := identity.NewIdentity(9876, labels.Labels{notSelectedEpLabels.Key: notSelectedEpLabels})
td.addIdentity(notSelectedIdentity)
hostLabels := labels.Labels{selectedEpLabels.Key: selectedEpLabels}
hostLabels.MergeLabels(labels.LabelHost)
hostIdentity := identity.NewIdentity(identity.ReservedIdentityHost, hostLabels)
td.addIdentity(hostIdentity)
// notSelectedEndpoint is not selected by rule, so we it shouldn't be added
// to EndpointsSelected.
require.False(t, epRule.matchesSubject(notSelectedIdentity))
// selectedEndpoint is selected by rule, so we it should be added to
// EndpointsSelected.
require.True(t, epRule.matchesSubject(selectedIdentity))
// Test again to check for caching working correctly.
require.True(t, epRule.matchesSubject(selectedIdentity))
// Possible scenario where an endpoint is deleted, and soon after another
// endpoint is added with the same ID, but with a different identity. Matching
// needs to handle this case correctly.
require.False(t, epRule.matchesSubject(notSelectedIdentity))
// host endpoint is not selected by rule, so we it shouldn't be added to EndpointsSelected.
require.False(t, epRule.matchesSubject(hostIdentity))
// selectedEndpoint is not selected by rule, so we it shouldn't be added to EndpointsSelected.
require.False(t, hostRule.matchesSubject(selectedIdentity))
// host endpoint is selected by rule, but host labels are mutable, so don't cache them
require.True(t, hostRule.matchesSubject(hostIdentity))
// Assert that mutable host identities are handled
// First, add an additional label, ensure that match succeeds
hostLabels.MergeLabels(labels.NewLabelsFromModel([]string{"foo=bar"}))
hostIdentity = identity.NewIdentity(identity.ReservedIdentityHost, hostLabels)
td.addIdentity(hostIdentity)
require.True(t, hostRule.matchesSubject(hostIdentity))
// Then, change host to id=c, which is not selected, and ensure match is correct
hostIdentity = identity.NewIdentity(identity.ReservedIdentityHost, labels.NewLabelsFromModel([]string{"id=c"}))
td.addIdentity(hostIdentity)
require.False(t, hostRule.matchesSubject(hostIdentity))
}
func BenchmarkRuleString(b *testing.B) {
r := &rule{
Rule: api.Rule{
EndpointSelector: api.NewESFromLabels(labels.ParseSelectLabel("bar")),
Ingress: []api.IngressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
{Port: "8080", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET", Path: "/"},
},
},
}},
},
},
Egress: []api.EgressRule{
{
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "3000", Protocol: api.ProtoAny},
},
}},
},
},
},
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = r.String()
}
}
// Test merging of L7 rules when the same rules apply to multiple selectors.
// This was added to prevent regression of a bug where the merging of l7 rules for "foo"
// also affected the rules for "baz".
func TestMergeL7PolicyEgressWithMultipleSelectors(t *testing.T) {
td := newTestData().withIDs(ruleTestIDs)
td.repo.MustAddList(api.Rules{
{
EndpointSelector: endpointSelectorA,
Egress: []api.EgressRule{
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorB},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Method: "GET"},
},
},
}},
},
{
EgressCommonRule: api.EgressCommonRule{
ToEndpoints: []api.EndpointSelector{endpointSelectorC},
},
ToPorts: []api.PortRule{{
Ports: []api.PortProtocol{
{Port: "80", Protocol: api.ProtoTCP},
},
Rules: &api.L7Rules{
HTTP: []api.PortRuleHTTP{
{Host: "foo"},
},
},
}},
},
},
},
})
expected := &L4Filter{
Port: 80, Protocol: api.ProtoTCP, U8Proto: 6,
L7Parser: ParserTypeHTTP,
PerSelectorPolicies: L7DataMap{
td.cachedSelectorB: nil,
td.cachedSelectorC: &PerSelectorPolicy{
L7Rules: api.L7Rules{
HTTP: []api.PortRuleHTTP{{Method: "GET"}, {Host: "foo"}},
},
isRedirect: true,
},
},
Ingress: false,
RuleOrigin: OriginForTest(map[CachedSelector]labels.LabelArrayList{
td.cachedSelectorB: {nil},
td.cachedSelectorC: {nil},
}),
}
pol, err := td.repo.resolvePolicyLocked(idA)
require.NoError(t, err)
defer pol.Detach()
filter := pol.L4Policy.Egress.PortRules.ExactLookup("80", 0, "TCP")
require.NotNil(t, filter)
require.Equal(t, uint16(80), filter.Port)
require.False(t, filter.Ingress)
require.True(t, expected.Equals(filter), "expected %s actual %s", expected.String(), filter.String())
}
func TestMergeListenerReference(t *testing.T) {
// No listener remains a no listener
ps := &PerSelectorPolicy{}
err := ps.mergeListenerReference(ps)
require.NoError(t, err)
require.Equal(t, "", ps.Listener)
require.Equal(t, uint8(0), ps.Priority)
// Listener reference remains when the other has none
ps0 := &PerSelectorPolicy{Listener: "listener0"}
err = ps0.mergeListenerReference(ps)
require.NoError(t, err)
require.Equal(t, "listener0", ps0.Listener)
require.Equal(t, uint8(0), ps0.Priority)
// Listener reference is propagated when there is none to begin with
err = ps.mergeListenerReference(ps0)
require.NoError(t, err)
require.Equal(t, "listener0", ps.Listener)
require.Equal(t, uint8(0), ps.Priority)
// A listener is not changed when there is no change
err = ps0.mergeListenerReference(ps0)
require.NoError(t, err)
require.Equal(t, "listener0", ps0.Listener)
require.Equal(t, uint8(0), ps0.Priority)
// Cannot merge two different listeners with the default (zero) priority
ps0a := &PerSelectorPolicy{Listener: "listener0a"}
err = ps0.mergeListenerReference(ps0a)
require.Error(t, err)
err = ps0a.mergeListenerReference(ps0)
require.Error(t, err)
// Listener with a defined (non-zero) priority takes precedence over
// a listener with an undefined (zero) priority
ps1 := &PerSelectorPolicy{Listener: "listener1", Priority: 1}
err = ps1.mergeListenerReference(ps0)
require.NoError(t, err)
require.Equal(t, "listener1", ps1.Listener)
require.Equal(t, uint8(1), ps1.Priority)
err = ps0.mergeListenerReference(ps1)
require.NoError(t, err)
require.Equal(t, "listener1", ps0.Listener)
require.Equal(t, uint8(1), ps0.Priority)
// Listener with the lower priority value takes precedence
ps2 := &PerSelectorPolicy{Listener: "listener2", Priority: 2}
err = ps1.mergeListenerReference(ps2)
require.NoError(t, err)
require.Equal(t, "listener1", ps1.Listener)
require.Equal(t, uint8(1), ps1.Priority)
err = ps2.mergeListenerReference(ps1)
require.NoError(t, err)
require.Equal(t, "listener1", ps2.Listener)
require.Equal(t, uint8(1), ps2.Priority)
// Cannot merge two different listeners with the same priority
ps12 := &PerSelectorPolicy{Listener: "listener1", Priority: 2}
ps2 = &PerSelectorPolicy{Listener: "listener2", Priority: 2}
err = ps12.mergeListenerReference(ps2)
require.Error(t, err)
err = ps2.mergeListenerReference(ps12)
require.Error(t, err)
// Lower priority is propagated also when the listeners are the same
ps23 := &PerSelectorPolicy{Listener: "listener2", Priority: 3}
err = ps2.mergeListenerReference(ps23)
require.NoError(t, err)
require.Equal(t, "listener2", ps2.Listener)
require.Equal(t, uint8(2), ps2.Priority)
err = ps23.mergeListenerReference(ps2)
require.NoError(t, err)
require.Equal(t, "listener2", ps23.Listener)
require.Equal(t, uint8(2), ps23.Priority)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
policyapi "github.com/cilium/cilium/pkg/policy/api"
)
// ruleSlice is a wrapper around a slice of *rule, which allows for functions
// to be written with []*rule as a receiver.
type ruleSlice []*rule
func (rules ruleSlice) resolveL4IngressPolicy(policyCtx PolicyContext, ctx *SearchContext) (L4PolicyMap, error) {
result := NewL4PolicyMap()
ctx.PolicyTrace("\n")
ctx.PolicyTrace("Resolving ingress policy for %+v\n", ctx.To)
state := traceState{}
var matchedRules ruleSlice
var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement
// Iterate over all FromRequires which select ctx.To. These requirements
// will be appended to each EndpointSelector's MatchExpressions in
// each FromEndpoints for all ingress rules. This ensures that FromRequires
// is taken into account when evaluating policy at L4.
for _, r := range rules {
if ctx.rulesSelect || r.getSelector().Matches(ctx.To) {
matchedRules = append(matchedRules, r)
for _, ingressRule := range r.Ingress {
for _, requirement := range ingressRule.FromRequires {
requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
for _, ingressRule := range r.IngressDeny {
for _, requirement := range ingressRule.FromRequires {
requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
}
}
// Only dealing with matching rules from now on. Mark it in the ctx
oldRulesSelect := ctx.rulesSelect
ctx.rulesSelect = true
for _, r := range matchedRules {
_, err := r.resolveIngressPolicy(policyCtx, ctx, &state, result, requirements, requirementsDeny)
if err != nil {
return nil, err
}
state.ruleID++
}
state.trace(len(rules), ctx)
// Restore ctx in case caller uses it again.
ctx.rulesSelect = oldRulesSelect
return result, nil
}
func (rules ruleSlice) resolveL4EgressPolicy(policyCtx PolicyContext, ctx *SearchContext) (L4PolicyMap, error) {
result := NewL4PolicyMap()
ctx.PolicyTrace("\n")
ctx.PolicyTrace("Resolving egress policy for %+v\n", ctx.From)
state := traceState{}
var matchedRules ruleSlice
var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement
// Iterate over all ToRequires which select ctx.To. These requirements will
// be appended to each EndpointSelector's MatchExpressions in each
// ToEndpoints for all egress rules. This ensures that ToRequires is
// taken into account when evaluating policy at L4.
for _, r := range rules {
if ctx.rulesSelect || r.getSelector().Matches(ctx.From) {
matchedRules = append(matchedRules, r)
for _, egressRule := range r.Egress {
for _, requirement := range egressRule.ToRequires {
requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
for _, egressRule := range r.EgressDeny {
for _, requirement := range egressRule.ToRequires {
requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...)
}
}
}
}
// Only dealing with matching rules from now on. Mark it in the ctx
oldRulesSelect := ctx.rulesSelect
ctx.rulesSelect = true
for i, r := range matchedRules {
state.ruleID = i
_, err := r.resolveEgressPolicy(policyCtx, ctx, &state, result, requirements, requirementsDeny)
if err != nil {
return nil, err
}
state.ruleID++
}
state.trace(len(rules), ctx)
// Restore ctx in case caller uses it again.
ctx.rulesSelect = oldRulesSelect
return result, nil
}
// AsPolicyRules return the internal policyapi.Rule objects as a policyapi.Rules object
func (rules ruleSlice) AsPolicyRules() policyapi.Rules {
policyRules := make(policyapi.Rules, 0, len(rules))
for _, r := range rules {
policyRules = append(policyRules, &r.Rule)
}
return policyRules
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"sync"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
)
// scIdentity is the information we need about a an identity that rules can select
type scIdentity struct {
NID identity.NumericIdentity
lbls labels.LabelArray
namespace string // value of the namespace label, or ""
}
// scIdentityCache is a cache of Identities keyed by the numeric identity
type scIdentityCache map[identity.NumericIdentity]scIdentity
func newIdentity(nid identity.NumericIdentity, lbls labels.LabelArray) scIdentity {
return scIdentity{
NID: nid,
lbls: lbls,
namespace: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
}
}
// userNotification stores the information needed to call
// IdentitySelectionUpdated callbacks to notify users of selector's
// identity changes. These are queued to be able to call the callbacks
// in FIFO order while not holding any locks.
type userNotification struct {
user CachedSelectionUser
selector CachedSelector // nil for a sync notification
txn *versioned.Tx // nil for non-sync notifications
added []identity.NumericIdentity
deleted []identity.NumericIdentity
wg *sync.WaitGroup
}
// SelectorCache caches identities, identity selectors, and the
// subsets of identities each selector selects.
type SelectorCache struct {
versioned *versioned.Coordinator
mutex lock.RWMutex
// selectorUpdates tracks changed selectors for efficient cleanup of old versions
selectorUpdates versioned.VersionedSlice[*identitySelector]
// idCache contains all known identities as informed by the
// kv-store and the local identity facility via our
// UpdateIdentities() function.
idCache scIdentityCache
// map key is the string representation of the selector being cached.
selectors map[string]*identitySelector
localIdentityNotifier identityNotifier
// userCond is a condition variable for receiving signals
// about addition of new elements in userNotes
userCond *sync.Cond
// userMutex protects userNotes and is linked to userCond
userMutex lock.Mutex
// userNotes holds a FIFO list of user notifications to be made
userNotes []userNotification
// notifiedUsers is a set of all notified users
notifiedUsers map[CachedSelectionUser]struct{}
// used to lazily start the handler for user notifications.
startNotificationsHandlerOnce sync.Once
}
// GetVersionHandleFunc calls the given function with a versioned.VersionHandle for the
// current version of SelectorCache selections while selector cache is locked for writing, so that
// the caller may get ready for getting incremental updates that are possible right after the lock
// is released.
// This should only be used with trivial functions that can not lock or sleep.
// Use the plain 'GetVersionHandle' whenever possible, as it does not lock the selector cache.
// VersionHandle passed to 'f' must be closed with Close().
func (sc *SelectorCache) GetVersionHandleFunc(f func(*versioned.VersionHandle)) {
// Lock synchronizes with UpdateIdentities() so that we do not use a stale version
// that may already have received partial incremental updates.
// Incremental updates are delivered asynchronously, so so the caller may still receive
// updates for older versions. These should be filtered out.
sc.mutex.Lock()
defer sc.mutex.Unlock()
f(sc.GetVersionHandle())
}
// GetVersionHandle returns a VersoionHandle for the current version.
// The returned VersionHandle must be closed with Close()
func (sc *SelectorCache) GetVersionHandle() *versioned.VersionHandle {
return sc.versioned.GetVersionHandle()
}
// GetModel returns the API model of the SelectorCache.
func (sc *SelectorCache) GetModel() models.SelectorCache {
sc.mutex.RLock()
defer sc.mutex.RUnlock()
selCacheMdl := make(models.SelectorCache, 0, len(sc.selectors))
// Get handle to the current version. Any concurrent updates will not be visible in the
// returned model.
version := sc.GetVersionHandle()
defer version.Close()
for selector, idSel := range sc.selectors {
selections := idSel.GetSelections(version)
ids := make([]int64, 0, len(selections))
for i := range selections {
ids = append(ids, int64(selections[i]))
}
selMdl := &models.SelectorIdentityMapping{
Selector: selector,
Identities: ids,
Users: int64(idSel.numUsers()),
Labels: labelArrayToModel(idSel.GetMetadataLabels()),
}
selCacheMdl = append(selCacheMdl, selMdl)
}
return selCacheMdl
}
func (sc *SelectorCache) Stats() selectorStats {
result := newSelectorStats()
sc.mutex.RLock()
defer sc.mutex.RUnlock()
version := sc.GetVersionHandle()
defer version.Close()
for _, idSel := range sc.selectors {
if !idSel.MaySelectPeers() {
// Peer selectors impact policymap cardinality, but
// subject selectors do not. Do not count cardinality
// if the selector is only used for policy subjects.
continue
}
selections := idSel.GetSelections(version)
class := idSel.source.metricsClass()
if result.maxCardinalityByClass[class] < len(selections) {
result.maxCardinalityByClass[class] = len(selections)
}
}
return result
}
func labelArrayToModel(arr labels.LabelArray) models.LabelArray {
lbls := make(models.LabelArray, 0, len(arr))
for _, l := range arr {
lbls = append(lbls, &models.Label{
Key: l.Key,
Value: l.Value,
Source: l.Source,
})
}
return lbls
}
func (sc *SelectorCache) handleUserNotifications() {
for {
sc.userMutex.Lock()
for len(sc.userNotes) == 0 {
sc.userCond.Wait()
}
// get the current batch of notifications and release the lock so that SelectorCache
// can't block on userMutex while we call IdentitySelectionUpdated callbacks below.
notifications := sc.userNotes
sc.userNotes = nil
sc.userMutex.Unlock()
for _, n := range notifications {
if n.selector == nil {
n.user.IdentitySelectionCommit(n.txn)
} else {
n.user.IdentitySelectionUpdated(n.selector, n.added, n.deleted)
}
n.wg.Done()
}
}
}
func (sc *SelectorCache) queueUserNotification(user CachedSelectionUser, selector CachedSelector, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
sc.startNotificationsHandlerOnce.Do(func() {
go sc.handleUserNotifications()
})
wg.Add(1)
sc.userMutex.Lock()
if sc.notifiedUsers == nil {
sc.notifiedUsers = make(map[CachedSelectionUser]struct{})
}
sc.notifiedUsers[user] = struct{}{}
sc.userNotes = append(sc.userNotes, userNotification{
user: user,
selector: selector,
added: added,
deleted: deleted,
wg: wg,
})
sc.userMutex.Unlock()
sc.userCond.Signal()
}
func (sc *SelectorCache) queueNotifiedUsersCommit(txn *versioned.Tx, wg *sync.WaitGroup) {
sc.userMutex.Lock()
for user := range sc.notifiedUsers {
wg.Add(1)
// sync notification has a nil selector
sc.userNotes = append(sc.userNotes, userNotification{
user: user,
txn: txn,
wg: wg,
})
}
sc.notifiedUsers = nil
sc.userMutex.Unlock()
sc.userCond.Signal()
}
// NewSelectorCache creates a new SelectorCache with the given identities.
func NewSelectorCache(ids identity.IdentityMap) *SelectorCache {
sc := &SelectorCache{
idCache: make(map[identity.NumericIdentity]scIdentity, len(ids)),
selectors: make(map[string]*identitySelector),
}
sc.userCond = sync.NewCond(&sc.userMutex)
sc.versioned = &versioned.Coordinator{
Cleaner: sc.oldVersionCleaner,
Logger: log,
}
for nid, lbls := range ids {
sc.idCache[nid] = newIdentity(nid, lbls)
}
return sc
}
func (sc *SelectorCache) RegisterMetrics() {
if err := metrics.Register(newSelectorCacheMetrics(sc)); err != nil {
log.WithError(err).Warning("Selector cache metrics registration failed. No metrics will be reported.")
}
}
// oldVersionCleaner is called from a goroutine without holding any locks
func (sc *SelectorCache) oldVersionCleaner(keepVersion versioned.KeepVersion) {
// Log before taking the lock so that if we ever have a deadlock here this log line will be seen
log.WithField(logfields.Version, keepVersion).Debug("Cleaning old selector and identity versions")
// This is called when some versions are no longer needed, from wherever
// VersionHandle's may be kept, so we must take the lock to safely access
// 'sc.selectorUpdates'.
sc.mutex.Lock()
defer sc.mutex.Unlock()
n := 0
for idSel := range sc.selectorUpdates.Before(keepVersion) {
idSel.selections.RemoveBefore(keepVersion)
n++
}
sc.selectorUpdates = sc.selectorUpdates[n:]
}
// SetLocalIdentityNotifier injects the provided identityNotifier into the
// SelectorCache. Currently, this is used to inject the FQDN subsystem into
// the SelectorCache so the SelectorCache can notify the FQDN subsystem when
// it should be aware of a given FQDNSelector for which CIDR identities need
// to be provided upon DNS lookups which corespond to said FQDNSelector.
func (sc *SelectorCache) SetLocalIdentityNotifier(pop identityNotifier) {
sc.localIdentityNotifier = pop
}
var (
// wildcardSelectorKey is used to compare if a key is for a wildcard
wildcardSelectorKey = api.WildcardEndpointSelector.LabelSelector.String()
// noneSelectorKey is used to compare if a key is for "reserved:none"
noneSelectorKey = api.EndpointSelectorNone.LabelSelector.String()
)
// identityNotifier provides a means for other subsystems to be made aware of a
// given FQDNSelector (currently pkg/fqdn) so that said subsystems can notify
// the IPCache about IPs which correspond to said FQDNSelector.
// This is necessary as there is nothing intrinsic about an IP that says that
// it corresponds to a given FQDNSelector; this relationship is contained only
// via DNS responses, which are handled externally.
type identityNotifier interface {
// RegisterFQDNSelector exposes this FQDNSelector so that the identity labels
// of IPs contained in a DNS response that matches said selector can be
// associated with that selector.
RegisterFQDNSelector(selector api.FQDNSelector)
// UnregisterFQDNSelector removes this FQDNSelector from the set of
// IPs which are being tracked by the identityNotifier. The result
// of this is that an IP may be evicted from IPCache if it is no longer
// selected by any other FQDN selector.
// This occurs when there are no more users of a given FQDNSelector for the
// SelectorCache.
UnregisterFQDNSelector(selector api.FQDNSelector)
}
// AddFQDNSelector adds the given api.FQDNSelector in to the selector cache. If
// an identical EndpointSelector has already been cached, the corresponding
// types.CachedSelector is returned, otherwise one is created and added to the cache.
func (sc *SelectorCache) AddFQDNSelector(user CachedSelectionUser, lbls stringLabels, fqdnSelec api.FQDNSelector) (cachedSelector types.CachedSelector, added bool) {
key := fqdnSelec.String()
sc.mutex.Lock()
defer sc.mutex.Unlock()
// If the selector already exists, use it.
idSel, exists := sc.selectors[key]
if exists {
return idSel, idSel.addUser(user)
}
source := &fqdnSelector{
selector: fqdnSelec,
}
// Make the FQDN subsystem aware of this selector
sc.localIdentityNotifier.RegisterFQDNSelector(source.selector)
return sc.addSelectorLocked(user, lbls, key, source)
}
// must hold lock for writing
func (sc *SelectorCache) addSelectorLocked(user CachedSelectionUser, lbls stringLabels, key string, source selectorSource) (types.CachedSelector, bool) {
idSel := &identitySelector{
key: key,
users: make(map[CachedSelectionUser]struct{}),
cachedSelections: make(map[identity.NumericIdentity]struct{}),
source: source,
metadataLbls: lbls,
}
sc.selectors[key] = idSel
// Scan the cached set of IDs to determine any new matchers
for nid, identity := range sc.idCache {
if idSel.source.matches(identity) {
idSel.cachedSelections[nid] = struct{}{}
}
}
// Note: No notifications are sent for the existing
// identities. Caller must use GetSelections() to get the
// current selections after adding a selector. This way the
// behavior is the same between the two cases here (selector
// is already cached, or is a new one).
// Create the immutable slice representation of the selected
// numeric identities
txn := sc.versioned.PrepareNextVersion()
idSel.updateSelections(txn)
txn.Commit()
return idSel, idSel.addUser(user)
}
// FindCachedIdentitySelector finds the given api.EndpointSelector in the
// selector cache, returning nil if one can not be found.
func (sc *SelectorCache) FindCachedIdentitySelector(selector api.EndpointSelector) types.CachedSelector {
key := selector.CachedString()
sc.mutex.RLock()
idSel := sc.selectors[key]
sc.mutex.RUnlock()
return idSel
}
// AddIdentitySelector adds the given api.EndpointSelector in to the
// selector cache. If an identical EndpointSelector has already been
// cached, the corresponding types.CachedSelector is returned, otherwise one
// is created and added to the cache.
func (sc *SelectorCache) AddIdentitySelector(user types.CachedSelectionUser, lbls stringLabels, selector api.EndpointSelector) (cachedSelector types.CachedSelector, added bool) {
// The key returned here may be different for equivalent
// labelselectors, if the selector's requirements are stored
// in different orders. When this happens we'll be tracking
// essentially two copies of the same selector.
key := selector.CachedString()
sc.mutex.Lock()
defer sc.mutex.Unlock()
idSel, exists := sc.selectors[key]
if exists {
return idSel, idSel.addUser(user)
}
// Selectors are never modified once a rule is placed in the policy repository,
// so no need to deep copy.
source := &labelIdentitySelector{
selector: selector,
}
// check is selector has a namespace match or requirement
if namespaces, ok := selector.GetMatch(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel); ok {
source.namespaces = namespaces
}
return sc.addSelectorLocked(user, lbls, key, source)
}
// lock must be held
func (sc *SelectorCache) removeSelectorLocked(selector types.CachedSelector, user CachedSelectionUser) {
key := selector.String()
sel, exists := sc.selectors[key]
if exists {
if sel.removeUser(user) {
sel.source.remove(sc.localIdentityNotifier)
delete(sc.selectors, key)
}
}
}
// RemoveSelector removes types.CachedSelector for the user.
func (sc *SelectorCache) RemoveSelector(selector types.CachedSelector, user CachedSelectionUser) {
sc.mutex.Lock()
sc.removeSelectorLocked(selector, user)
sc.mutex.Unlock()
}
// RemoveSelectors removes types.CachedSelectorSlice for the user.
func (sc *SelectorCache) RemoveSelectors(selectors types.CachedSelectorSlice, user CachedSelectionUser) {
sc.mutex.Lock()
for _, selector := range selectors {
sc.removeSelectorLocked(selector, user)
}
sc.mutex.Unlock()
}
// ChangeUser changes the CachedSelectionUser that gets updates on the
// updates on the cached selector.
func (sc *SelectorCache) ChangeUser(selector types.CachedSelector, from, to CachedSelectionUser) {
key := selector.String()
sc.mutex.Lock()
idSel, exists := sc.selectors[key]
if exists {
// Add before remove so that the count does not dip to zero in between,
// as this causes FQDN unregistration (if applicable).
idSel.addUser(to)
// ignoring the return value as we have just added a user above
idSel.removeUser(from)
}
sc.mutex.Unlock()
}
// CanSkipUpdate returns true if a proposed update is already known to the SelectorCache
// and thus a no-op. Is used to de-dup an ID update stream, because identical updates
// may come from multiple sources.
func (sc *SelectorCache) CanSkipUpdate(added, deleted identity.IdentityMap) bool {
sc.mutex.RLock()
defer sc.mutex.RUnlock()
for nid := range deleted {
if _, exists := sc.idCache[nid]; exists {
return false
}
}
for nid, lbls := range added {
haslbls, exists := sc.idCache[nid]
if !exists { // id not known to us: cannot skip
return false
}
if !haslbls.lbls.Equals(lbls) {
// labels are not equal: cannot skip
return false
}
}
return true
}
// UpdateIdentities propagates identity updates to selectors
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
//
// Caller should Wait() on the returned sync.WaitGroup before triggering any
// policy updates. Policy updates may need Endpoint locks, so this Wait() can
// deadlock if the caller is holding any endpoint locks.
//
// Incremental deletes of mutated identities are not sent to the users, as that could
// lead to deletion of policy map entries while other selectors may still select the mutated
// identity.
// In this case the return value is 'true' and the caller should trigger policy updates on all
// endpoints to remove the affected identity only from selectors that no longer select the mutated
// identity.
func (sc *SelectorCache) UpdateIdentities(added, deleted identity.IdentityMap, wg *sync.WaitGroup) (mutated bool) {
sc.mutex.Lock()
defer sc.mutex.Unlock()
txn := sc.versioned.PrepareNextVersion()
// Update idCache so that newly added selectors get
// prepopulated with all matching numeric identities.
for numericID := range deleted {
if old, exists := sc.idCache[numericID]; exists {
log.WithFields(logrus.Fields{
logfields.NewVersion: txn,
logfields.Identity: numericID,
logfields.Labels: old.lbls,
}).Debug("UpdateIdentities: Deleting identity")
delete(sc.idCache, numericID)
} else {
log.WithFields(logrus.Fields{
logfields.NewVersion: txn,
logfields.Identity: numericID,
}).Warning("UpdateIdentities: Skipping Delete of a non-existing identity")
delete(deleted, numericID)
}
}
for numericID, lbls := range added {
if old, exists := sc.idCache[numericID]; exists {
// Skip if no change. Not skipping if label
// order is different, but identity labels are
// sorted for the kv-store, so there should
// not be too many false negatives.
if lbls.Equals(old.lbls) {
log.WithFields(logrus.Fields{
logfields.NewVersion: txn,
logfields.Identity: numericID,
}).Debug("UpdateIdentities: Skipping add of an existing identical identity")
delete(added, numericID)
continue
}
scopedLog := log.WithFields(logrus.Fields{
logfields.NewVersion: txn,
logfields.Identity: numericID,
logfields.Labels: old.lbls,
logfields.Labels + "(new)": lbls},
)
msg := "UpdateIdentities: Updating an existing identity"
// Warn if any other ID has their labels change, besides local
// host. The local host can have its labels change at runtime if
// the kube-apiserver is running on the local host, see
// ipcache.TriggerLabelInjection().
if numericID == identity.ReservedIdentityHost {
scopedLog.Debug(msg)
} else {
scopedLog.Warning(msg)
}
} else {
log.WithFields(logrus.Fields{
logfields.NewVersion: txn,
logfields.Identity: numericID,
logfields.Labels: lbls,
}).Debug("UpdateIdentities: Adding a new identity")
}
sc.idCache[numericID] = newIdentity(numericID, lbls)
}
updated := false
if len(deleted)+len(added) > 0 {
// Iterate through all locally used identity selectors and
// update the cached numeric identities as required.
for _, idSel := range sc.selectors {
var adds, dels []identity.NumericIdentity
for numericID := range deleted {
if _, exists := idSel.cachedSelections[numericID]; exists {
dels = append(dels, numericID)
delete(idSel.cachedSelections, numericID)
}
}
for numericID := range added {
matches := idSel.source.matches(sc.idCache[numericID])
_, exists := idSel.cachedSelections[numericID]
if matches && !exists {
adds = append(adds, numericID)
idSel.cachedSelections[numericID] = struct{}{}
} else if !matches && exists {
// Identity was mutated and no longer matches, the identity
// is deleted from the cached selections, but is not sent to
// users as a deletion. Instead, we return 'mutated = true'
// telling the caller to trigger forced policy updates on
// all endpoints to recompute the policy as if the mutated
// identity was never selected by the affected selector.
mutated = true
delete(idSel.cachedSelections, numericID)
}
}
if len(dels)+len(adds) > 0 {
updated = true
sc.selectorUpdates = sc.selectorUpdates.Append(idSel, txn)
idSel.updateSelections(txn)
idSel.notifyUsers(sc, adds, dels, wg)
}
}
}
if updated {
// Launch a waiter that holds the new version as long as needed for users to have grabbed it
sc.queueNotifiedUsersCommit(txn, wg)
go func(version *versioned.VersionHandle) {
wg.Wait()
log.WithFields(logrus.Fields{
logfields.NewVersion: txn,
}).Debug("UpdateIdentities: Waited for incremental updates to have committed, closing handle on the new version.")
version.Close()
}(txn.GetVersionHandle())
txn.Commit()
}
return mutated
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"sort"
"sync"
"github.com/hashicorp/go-hclog"
"github.com/sirupsen/logrus"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/policy/types"
)
type CachedSelector types.CachedSelector
type CachedSelectorSlice types.CachedSelectorSlice
type CachedSelectionUser types.CachedSelectionUser
// identitySelector is the internal type for all selectors in the
// selector cache.
//
// identitySelector represents the mapping of an EndpointSelector
// to a slice of identities. These mappings are updated via two
// different processes:
//
// 1. When policy rules are changed these are added and/or deleted
// depending on what selectors the rules contain. Cached selections of
// new identitySelectors are pre-populated from the set of currently
// known identities.
//
// 2. When reachable identities appear or disappear, either via local
// allocation (CIDRs), or via the KV-store (remote endpoints). In this
// case all existing identitySelectors are walked through and their
// cached selections are updated as necessary.
//
// In both of the above cases the set of existing identitySelectors is
// write locked.
//
// To minimize the upkeep the identity selectors are shared across
// all IdentityPolicies, so that only one copy exists for each
// identitySelector. Users of the SelectorCache take care of creating
// identitySelectors as needed by identity policies. The set of
// identitySelectors is read locked during an IdentityPolicy update so
// that the policy is always updated using a coherent set of
// cached selections.
//
// identitySelector is used as a map key, so it must not be implemented by a
// map, slice, or a func, or a runtime panic will be triggered. In all
// cases below identitySelector is being implemented by structs.
//
// identitySelector is used in the policy engine as a map key,
// so it must always be given to the user as a pointer to the actual type.
// (The public methods only expose the CachedSelector interface.)
type identitySelector struct {
source selectorSource
key string
selections versioned.Value[identity.NumericIdentitySlice]
users map[CachedSelectionUser]struct{}
cachedSelections map[identity.NumericIdentity]struct{}
metadataLbls stringLabels
}
func (i *identitySelector) MaySelectPeers() bool {
for user := range i.users {
if user.IsPeerSelector() {
return true
}
}
return false
}
// identitySelector implements CachedSelector
var _ types.CachedSelector = (*identitySelector)(nil)
type selectorSource interface {
matches(scIdentity) bool
remove(identityNotifier)
metricsClass() string
}
// fqdnSelector implements the selectorSource for a FQDNSelector. A fqdnSelector
// matches an identity if the identity has a `fqdn:` label matching the FQDN
// selector string.
// In addition, the remove implementation calls back into the DNS name manager
// to unregister the FQDN selector.
type fqdnSelector struct {
selector api.FQDNSelector
}
func (f *fqdnSelector) remove(dnsProxy identityNotifier) {
dnsProxy.UnregisterFQDNSelector(f.selector)
}
// matches returns true if the identity contains at least one label
// that matches the FQDNSelector's IdentityLabel string
func (f *fqdnSelector) matches(identity scIdentity) bool {
return identity.lbls.Intersects(labels.LabelArray{f.selector.IdentityLabel()})
}
func (f *fqdnSelector) metricsClass() string {
return LabelValueSCFQDN
}
type labelIdentitySelector struct {
selector api.EndpointSelector
namespaces []string // allowed namespaces, or ""
}
// xxxMatches returns true if the CachedSelector matches given labels.
// This is slow, but only used for policy tracing, so it's OK.
func (l *labelIdentitySelector) xxxMatches(labels labels.LabelArray) bool {
return l.selector.Matches(labels)
}
func (l *labelIdentitySelector) matchesNamespace(ns string) bool {
if len(l.namespaces) > 0 {
if ns != "" {
for i := range l.namespaces {
if ns == l.namespaces[i] {
return true
}
}
}
// namespace required, but no match
return false
}
// no namespace required, match
return true
}
func (l *labelIdentitySelector) matches(identity scIdentity) bool {
return l.matchesNamespace(identity.namespace) && l.selector.Matches(identity.lbls)
}
func (l *labelIdentitySelector) remove(_ identityNotifier) {
// only useful for fqdn selectors
}
func (l *labelIdentitySelector) metricsClass() string {
if l.selector.DeepEqual(&api.EntitySelectorMapping[api.EntityCluster][0]) {
return LabelValueSCCluster
}
for _, entity := range api.EntitySelectorMapping[api.EntityWorld] {
if l.selector.DeepEqual(&entity) {
return LabelValueSCWorld
}
}
return LabelValueSCOther
}
// lock must be held
//
// The caller is responsible for making sure the same identity is not
// present in both 'added' and 'deleted'.
func (i *identitySelector) notifyUsers(sc *SelectorCache, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
for user := range i.users {
// pass 'f' to the user as '*fqdnSelector'
sc.queueUserNotification(user, i, added, deleted, wg)
}
}
// Equal is used by checker.Equals, and only considers the identity of the selector,
// ignoring the internal state!
func (i *identitySelector) Equal(b *identitySelector) bool {
return i.key == b.key
}
//
// CachedSelector implementation (== Public API)
//
// No locking needed.
//
// GetSelections returns the set of numeric identities currently
// selected. The cached selections can be concurrently updated. In
// that case GetSelections() will return either the old or new version
// of the selections. If the old version is returned, the user is
// guaranteed to receive a notification including the update.
func (i *identitySelector) GetSelections(version *versioned.VersionHandle) identity.NumericIdentitySlice {
if !version.IsValid() {
log.WithFields(logrus.Fields{
logfields.Version: version,
logfields.Stacktrace: hclog.Stacktrace(),
}).Error("GetSelections: Invalid VersionHandle finds nothing")
return identity.NumericIdentitySlice{}
}
return i.selections.At(version)
}
func (i *identitySelector) GetMetadataLabels() labels.LabelArray {
return labels.LabelArrayFromString(string(i.metadataLbls.Value()))
}
// Selects return 'true' if the CachedSelector selects the given
// numeric identity.
func (i *identitySelector) Selects(version *versioned.VersionHandle, nid identity.NumericIdentity) bool {
if i.IsWildcard() {
return true
}
nids := i.GetSelections(version)
idx := sort.Search(len(nids), func(i int) bool { return nids[i] >= nid })
return idx < len(nids) && nids[idx] == nid
}
// IsWildcard returns true if the endpoint selector selects all
// endpoints.
func (i *identitySelector) IsWildcard() bool {
return i.key == wildcardSelectorKey
}
// IsNone returns true if the endpoint selector never selects anything.
func (i *identitySelector) IsNone() bool {
return i.key == noneSelectorKey
}
// String returns the map key for this selector
func (i *identitySelector) String() string {
return i.key
}
//
// identitySelector implementation (== internal API)
//
// lock must be held
func (i *identitySelector) addUser(user CachedSelectionUser) (added bool) {
if _, exists := i.users[user]; exists {
return false
}
i.users[user] = struct{}{}
return true
}
// locks must be held for the dnsProxy and the SelectorCache (if the selector is a FQDN selector)
func (i *identitySelector) removeUser(user CachedSelectionUser) (last bool) {
delete(i.users, user)
return len(i.users) == 0
}
// lock must be held
func (i *identitySelector) numUsers() int {
return len(i.users)
}
// updateSelections updates the immutable slice representation of the
// cached selections after the cached selections have been changed.
//
// lock must be held
func (i *identitySelector) updateSelections(nextVersion *versioned.Tx) {
selections := make(identity.NumericIdentitySlice, len(i.cachedSelections))
idx := 0
for nid := range i.cachedSelections {
selections[idx] = nid
idx++
}
// Sort the numeric identities so that the map iteration order
// does not matter. This makes testing easier, but may help
// identifying changes easier also otherwise.
sort.Slice(selections, func(i, j int) bool {
return selections[i] < selections[j]
})
i.setSelections(selections, nextVersion)
}
func (i *identitySelector) setSelections(selections identity.NumericIdentitySlice, nextVersion *versioned.Tx) {
var err error
if len(selections) > 0 {
err = i.selections.SetAt(selections, nextVersion)
} else {
err = i.selections.RemoveAt(nextVersion)
}
if err != nil {
stacktrace := hclog.Stacktrace()
log.WithError(err).WithField(logfields.Stacktrace, stacktrace).Error("setSelections failed")
}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"net/netip"
"sync"
"testing"
"github.com/stretchr/testify/require"
"github.com/cilium/cilium/pkg/container/versioned"
"github.com/cilium/cilium/pkg/identity"
k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/policy/api"
policytypes "github.com/cilium/cilium/pkg/policy/types"
testidentity "github.com/cilium/cilium/pkg/testutils/identity"
)
type cachedSelectionUser struct {
t *testing.T
sc *SelectorCache
name string
updateMutex lock.Mutex
updateCond *sync.Cond
selections map[CachedSelector][]identity.NumericIdentity
notifications int
adds int
deletes int
}
func (sc *SelectorCache) haveUserNotifications() bool {
sc.userMutex.Lock()
defer sc.userMutex.Unlock()
return len(sc.userNotes) > 0
}
func newUser(t *testing.T, name string, sc *SelectorCache) *cachedSelectionUser {
csu := &cachedSelectionUser{
t: t,
sc: sc,
name: name,
selections: make(map[CachedSelector][]identity.NumericIdentity),
}
csu.updateCond = sync.NewCond(&csu.updateMutex)
return csu
}
func haveNid(nid identity.NumericIdentity, selections []identity.NumericIdentity) bool {
for i := range selections {
if selections[i] == nid {
return true
}
}
return false
}
func (csu *cachedSelectionUser) AddIdentitySelector(sel api.EndpointSelector) CachedSelector {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
cached, added := csu.sc.AddIdentitySelector(csu, EmptyStringLabels, sel)
require.NotNil(csu.t, cached)
_, exists := csu.selections[cached]
// Not added if already exists for this user
require.Equal(csu.t, !exists, added)
csu.selections[cached] = cached.GetSelections(versioned.Latest())
// Pre-existing selections are not notified as updates
require.False(csu.t, csu.sc.haveUserNotifications())
return cached
}
func (csu *cachedSelectionUser) AddFQDNSelector(sel api.FQDNSelector) CachedSelector {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
cached, added := csu.sc.AddFQDNSelector(csu, EmptyStringLabels, sel)
require.NotNil(csu.t, cached)
_, exists := csu.selections[cached]
// Not added if already exists for this user
require.Equal(csu.t, !exists, added)
csu.selections[cached] = cached.GetSelections(versioned.Latest())
// Pre-existing selections are not notified as updates
require.False(csu.t, csu.sc.haveUserNotifications())
return cached
}
func (csu *cachedSelectionUser) RemoveSelector(sel CachedSelector) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.sc.RemoveSelector(sel, csu)
delete(csu.selections, sel)
// No notifications for a removed selector
require.False(csu.t, csu.sc.haveUserNotifications())
}
func (csu *cachedSelectionUser) Reset() {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.notifications = 0
}
func (csu *cachedSelectionUser) WaitForUpdate() (adds, deletes int) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
for csu.notifications == 0 {
csu.updateCond.Wait()
}
return csu.adds, csu.deletes
}
func (csu *cachedSelectionUser) IdentitySelectionUpdated(selector policytypes.CachedSelector, added, deleted []identity.NumericIdentity) {
csu.updateMutex.Lock()
defer csu.updateMutex.Unlock()
csu.notifications++
csu.adds += len(added)
csu.deletes += len(deleted)
selections := selector.GetSelections(versioned.Latest())
// Validate added & deleted against the selections
for _, add := range added {
require.True(csu.t, haveNid(add, selections))
}
for _, del := range deleted {
require.False(csu.t, haveNid(del, selections))
}
// update selections
csu.selections[selector] = selections
}
func (csu *cachedSelectionUser) IdentitySelectionCommit(*versioned.Tx) {
csu.updateCond.Signal()
}
func (csu *cachedSelectionUser) IsPeerSelector() bool {
return true
}
// Mock CachedSelector for unit testing.
//
// testCachedSelector is used in isolation so there is no point to implement versioning for it.
type testCachedSelector struct {
name string
wildcard bool
selections []identity.NumericIdentity
}
func newTestCachedSelector(name string, wildcard bool, selections ...int) *testCachedSelector {
cs := &testCachedSelector{
name: name,
wildcard: wildcard,
selections: make([]identity.NumericIdentity, 0, len(selections)),
}
cs.addSelections(selections...)
return cs
}
// returns selections as []identity.NumericIdentity
func (cs *testCachedSelector) addSelections(selections ...int) (adds []identity.NumericIdentity) {
for _, id := range selections {
nid := identity.NumericIdentity(id)
adds = append(adds, nid)
if cs == nil {
continue
}
if !cs.Selects(versioned.Latest(), nid) {
cs.selections = append(cs.selections, nid)
}
}
return adds
}
// returns selections as []identity.NumericIdentity
func (cs *testCachedSelector) deleteSelections(selections ...int) (deletes []identity.NumericIdentity) {
for _, id := range selections {
nid := identity.NumericIdentity(id)
deletes = append(deletes, nid)
if cs == nil {
continue
}
for i := 0; i < len(cs.selections); i++ {
if nid == cs.selections[i] {
cs.selections = append(cs.selections[:i], cs.selections[i+1:]...)
i--
}
}
}
return deletes
}
// CachedSelector interface
func (cs *testCachedSelector) GetSelections(*versioned.VersionHandle) identity.NumericIdentitySlice {
return cs.selections
}
func (cs *testCachedSelector) GetMetadataLabels() labels.LabelArray {
return nil
}
func (cs *testCachedSelector) Selects(_ *versioned.VersionHandle, nid identity.NumericIdentity) bool {
for _, id := range cs.selections {
if id == nid {
return true
}
}
return false
}
func (cs *testCachedSelector) IsWildcard() bool {
return cs.wildcard
}
func (cs *testCachedSelector) IsNone() bool {
return false
}
func (cs *testCachedSelector) String() string {
return cs.name
}
func TestAddRemoveSelector(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s),
k8sConst.PodNamespaceLabel: labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections(versioned.Latest())
require.Len(t, selections, 1)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Try add the same selector from the same user the second time
testSelector = api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
cached2 := user1.AddIdentitySelector(testSelector)
require.Equal(t, cached, cached2)
// Add the same selector from a different user
testSelector = api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s),
labels.NewLabel(k8sConst.PodNamespaceLabel, "default", labels.LabelSourceK8s))
user2 := newUser(t, "user2", sc)
cached3 := user2.AddIdentitySelector(testSelector)
// Same old CachedSelector is returned, nothing new is cached
require.Equal(t, cached, cached3)
// Removing the first user does not remove the cached selector
user1.RemoveSelector(cached)
// Remove is idempotent
user1.RemoveSelector(cached)
// Removing the last user removes the cached selector
user2.RemoveSelector(cached3)
// Remove is idempotent
user2.RemoveSelector(cached3)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestMultipleIdentitySelectors(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
li1 := identity.IdentityScopeLocal
li2 := li1 + 1
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
li1: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.1/32")).LabelArray(),
li2: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/8")).LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceAny))
test2Selector := api.NewESFromLabels(labels.NewLabel("app", "test2", labels.LabelSourceAny))
// Test both exact and broader CIDR selectors
cidr32Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.1/32", "", labels.LabelSourceCIDR))
cidr24Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/24", "", labels.LabelSourceCIDR))
cidr8Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/8", "", labels.LabelSourceCIDR))
cidr7Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/7", "", labels.LabelSourceCIDR))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections(versioned.Latest())
require.Len(t, selections, 1)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Add another selector from the same user
cached2 := user1.AddIdentitySelector(test2Selector)
require.NotEqual(t, cached, cached2)
// Current selections contain the numeric identities of existing identities that match
selections2 := cached2.GetSelections(versioned.Latest())
require.Len(t, selections2, 1)
require.Equal(t, identity.NumericIdentity(2345), selections2[0])
shouldSelect := func(sel api.EndpointSelector, wantIDs ...identity.NumericIdentity) {
csel := user1.AddIdentitySelector(sel)
selections := csel.GetSelections(versioned.Latest())
require.EqualValues(t, identity.NumericIdentitySlice(wantIDs), selections)
user1.RemoveSelector(csel)
}
shouldSelect(cidr32Selector, li1)
shouldSelect(cidr24Selector, li1)
shouldSelect(cidr8Selector, li1, li2)
shouldSelect(cidr7Selector, li1, li2)
user1.RemoveSelector(cached)
user1.RemoveSelector(cached2)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestIdentityUpdates(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceAny))
test2Selector := api.NewESFromLabels(labels.NewLabel("app", "test2", labels.LabelSourceAny))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections(versioned.Latest())
require.Len(t, selections, 1)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
// Add another selector from the same user
cached2 := user1.AddIdentitySelector(test2Selector)
require.NotEqual(t, cached, cached2)
// Current selections contain the numeric identities of existing identities that match
selections2 := cached2.GetSelections(versioned.Latest())
require.Len(t, selections2, 1)
require.Equal(t, identity.NumericIdentity(2345), selections2[0])
user1.Reset()
// Add some identities to the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
12345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
adds, deletes := user1.WaitForUpdate()
require.Equal(t, 1, adds)
require.Equal(t, 0, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections(versioned.Latest())
require.Len(t, selections, 2)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
require.Equal(t, identity.NumericIdentity(12345), selections[1])
user1.Reset()
// Remove some identities from the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(nil, identity.IdentityMap{
12345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, wg)
wg.Wait()
adds, deletes = user1.WaitForUpdate()
require.Equal(t, 1, adds)
require.Equal(t, 1, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections(versioned.Latest())
require.Len(t, selections, 1)
require.Equal(t, identity.NumericIdentity(1234), selections[0])
user1.RemoveSelector(cached)
user1.RemoveSelector(cached2)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestIdentityUpdatesMultipleUsers(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
1234: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
2345: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
testSelector := api.NewESFromLabels(labels.NewLabel("app", "test", labels.LabelSourceK8s))
user1 := newUser(t, "user1", sc)
cached := user1.AddIdentitySelector(testSelector)
// Add same selector from a different user
user2 := newUser(t, "user2", sc)
cached2 := user2.AddIdentitySelector(testSelector)
require.Equal(t, cached, cached2)
user1.Reset()
user2.Reset()
// Add some identities to the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
123: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
234: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
345: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
adds, deletes := user1.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 0, deletes)
adds, deletes = user2.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 0, deletes)
// Current selections contain the numeric identities of existing identities that match
selections := cached.GetSelections(versioned.Latest())
require.Len(t, selections, 3)
require.Equal(t, identity.NumericIdentity(123), selections[0])
require.Equal(t, identity.NumericIdentity(345), selections[1])
require.Equal(t, identity.NumericIdentity(1234), selections[2])
require.EqualValues(t, cached2.GetSelections(versioned.Latest()), cached.GetSelections(versioned.Latest()))
user1.Reset()
user2.Reset()
// Remove some identities from the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(nil, identity.IdentityMap{
123: labels.Labels{"app": labels.NewLabel("app", "test", labels.LabelSourceK8s)}.LabelArray(),
234: labels.Labels{"app": labels.NewLabel("app", "test2", labels.LabelSourceK8s)}.LabelArray(),
}, wg)
wg.Wait()
adds, deletes = user1.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 1, deletes)
adds, deletes = user2.WaitForUpdate()
require.Equal(t, 2, adds)
require.Equal(t, 1, deletes)
// Current selections contain the numeric identities of existing identities that match
selections = cached.GetSelections(versioned.Latest())
require.Len(t, selections, 2)
require.Equal(t, identity.NumericIdentity(345), selections[0])
require.Equal(t, identity.NumericIdentity(1234), selections[1])
require.EqualValues(t, cached2.GetSelections(versioned.Latest()), cached.GetSelections(versioned.Latest()))
user1.RemoveSelector(cached)
user2.RemoveSelector(cached2)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestTransactionalUpdate(t *testing.T) {
sc := testNewSelectorCache(identity.IdentityMap{})
// Add some identities to the identity cache
wg := &sync.WaitGroup{}
li1 := identity.IdentityScopeLocal
li2 := li1 + 1
sc.UpdateIdentities(identity.IdentityMap{
li1: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.1/32")).LabelArray(),
li2: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/8")).LabelArray(),
}, nil, wg)
wg.Wait()
// Test both exact and broader CIDR selectors
cidr32Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.1/32", "", labels.LabelSourceCIDR))
cidr24Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/24", "", labels.LabelSourceCIDR))
cidr8Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/8", "", labels.LabelSourceCIDR))
cidr7Selector := api.NewESFromLabels(labels.NewLabel("cidr:10.0.0.0/7", "", labels.LabelSourceCIDR))
user1 := newUser(t, "user1", sc)
cs32 := user1.AddIdentitySelector(cidr32Selector)
cs24 := user1.AddIdentitySelector(cidr24Selector)
cs8 := user1.AddIdentitySelector(cidr8Selector)
cs7 := user1.AddIdentitySelector(cidr7Selector)
version := sc.versioned.GetVersionHandle()
defer version.Close()
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1}, cs24.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs8.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs7.GetSelections(version))
// Add some identities to the identity cache
li3 := li2 + 1
li4 := li3 + 1
wg = &sync.WaitGroup{}
sc.UpdateIdentities(identity.IdentityMap{
li3: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/31")).LabelArray(),
li4: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.0/7")).LabelArray(),
}, nil, wg)
wg.Wait()
// Old version handle still gets the same selections as before
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1}, cs24.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs8.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs7.GetSelections(version))
// New version handle sees the new updates on all selectors
version2 := sc.versioned.GetVersionHandle()
defer version2.Close()
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li3}, cs24.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li2, li3}, cs8.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li2, li3, li4}, cs7.GetSelections(version2))
// Remove some identities from the identity cache
wg = &sync.WaitGroup{}
sc.UpdateIdentities(nil, identity.IdentityMap{
li1: labels.GetCIDRLabels(netip.MustParsePrefix("10.0.0.1/32")).LabelArray(),
}, wg)
wg.Wait()
// Oldest version handle still gets the same selections as before
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1}, cs24.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs8.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1, li2}, cs7.GetSelections(version))
require.Equal(t, identity.NumericIdentitySlice{li1}, cs32.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li3}, cs24.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li2, li3}, cs8.GetSelections(version2))
require.Equal(t, identity.NumericIdentitySlice{li1, li2, li3, li4}, cs7.GetSelections(version2))
// New version handle sees the removal
version3 := sc.versioned.GetVersionHandle()
defer version3.Close()
require.Equal(t, identity.NumericIdentitySlice(nil), cs32.GetSelections(version3))
require.Equal(t, identity.NumericIdentitySlice{li3}, cs24.GetSelections(version3))
require.Equal(t, identity.NumericIdentitySlice{li2, li3}, cs8.GetSelections(version3))
require.Equal(t, identity.NumericIdentitySlice{li2, li3, li4}, cs7.GetSelections(version3))
user1.RemoveSelector(cs32)
user1.RemoveSelector(cs24)
user1.RemoveSelector(cs8)
user1.RemoveSelector(cs7)
// All identities removed
require.Empty(t, sc.selectors)
}
func TestSelectorManagerCanGetBeforeSet(t *testing.T) {
defer func() {
r := recover()
require.Nil(t, r)
}()
idSel := identitySelector{
key: "test",
users: make(map[CachedSelectionUser]struct{}),
}
selections := idSel.GetSelections(versioned.Latest())
require.Empty(t, selections)
}
func testNewSelectorCache(ids identity.IdentityMap) *SelectorCache {
sc := NewSelectorCache(ids)
sc.SetLocalIdentityNotifier(testidentity.NewDummyIdentityNotifier())
return sc
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import (
"github.com/cilium/cilium/pkg/logging/logfields"
)
// TriggerPolicyUpdates force full policy recomputation before
// regenerating all endpoints.
// This artificially bumps the policy revision, invalidating
// all cached policies. This is done when an additional resource
// used in policy calculation has changed.
func (u *Updater) TriggerPolicyUpdates(reason string) {
u.repo.BumpRevision()
log.WithField(logfields.Reason, reason).Info("Triggering full policy recalculation and regeneration of all endpoints")
u.regen.TriggerRegenerateAllEndpoints()
}
// NewUpdater returns a new Updater instance to handle triggering policy
// updates ready for use.
func NewUpdater(r PolicyRepository, regen regenerator) *Updater {
return &Updater{
regen: regen,
repo: r,
}
}
// Updater is responsible for triggering policy updates, in order to perform
// policy recalculation.
type Updater struct {
repo PolicyRepository
regen regenerator
}
type regenerator interface {
// RegenerateAllEndpoints should trigger a regeneration of all endpoints.
TriggerRegenerateAllEndpoints()
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package policy
import "github.com/cilium/cilium/pkg/labels"
// JoinPath returns a joined path from a and b.
func JoinPath(a, b string) string {
return a + labels.PathDelimiter + b
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package resiliency
// retryableErr tracks errors that could be retried.
type retryableErr struct {
error
}
// Retryable returns a new instance.
func Retryable(e error) retryableErr {
return retryableErr{error: e}
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package resiliency
import (
"errors"
"fmt"
)
type tuple struct {
index int
err error
}
// ErrorSet tracks a collection of unique errors.
type ErrorSet struct {
total, failed int
msg string
errs map[string]tuple
}
// NewErrorSet returns a new instance.
func NewErrorSet(msg string, c int) *ErrorSet {
return &ErrorSet{
msg: msg,
total: c,
errs: make(map[string]tuple),
}
}
// Add adds one or more errors to the set.
func (e *ErrorSet) Add(errs ...error) {
for _, err := range errs {
if err == nil {
continue
}
if _, ok := e.errs[err.Error()]; ok {
continue
}
e.errs[err.Error()] = tuple{index: e.failed, err: err}
e.failed++
}
}
// Error returns a list of unique errors or nil.
func (e *ErrorSet) Errors() []error {
if len(e.errs) == 0 {
return nil
}
errs := make([]error, len(e.errs)+1)
errs[0] = fmt.Errorf("%s (%d/%d) failed", e.msg, e.failed, e.total)
for _, t := range e.errs {
errs[t.index+1] = t.err
}
return errs
}
// Error returns a new composite error or nil.
func (e *ErrorSet) Error() error {
return errors.Join(e.Errors()...)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package resiliency
import (
"context"
"time"
"k8s.io/apimachinery/pkg/util/wait"
)
// RetryFunc tracks resiliency retry calls.
type RetryFunc func(ctx context.Context, retries int) (bool, error)
// Retry retries the provided call using exponential retries given an initial duration for up to max retries count.
func Retry(ctx context.Context, duration time.Duration, maxRetries int, fn RetryFunc) error {
bo := wait.Backoff{
Duration: duration,
Factor: 1,
Jitter: 0.1,
Steps: maxRetries,
}
var retries int
f := func(ctx context.Context) (bool, error) {
retries++
return fn(ctx, retries)
}
return wait.ExponentialBackoffWithContext(ctx, bo, f)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package resiliency
import (
"errors"
)
// IsRetryable checks if an error can be retried.
func IsRetryable(e error) bool {
return errors.As(e, new(retryableErr))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// package time is a wrapper for the stdlib time library that aliases most
// underlying types, but allows overrides for testing purposes.
//
// Synced to go-1.20.7.
package time
import (
"time"
)
const (
Layout = time.Layout
ANSIC = time.ANSIC
UnixDate = time.UnixDate
RubyDate = time.RubyDate
RFC822 = time.RFC822
RFC822Z = time.RFC822Z
RFC850 = time.RFC850
RFC1123 = time.RFC1123
RFC1123Z = time.RFC1123Z
RFC3339 = time.RFC3339
RFC3339Nano = time.RFC3339Nano
Kitchen = time.Kitchen
Stamp = time.Stamp
StampMilli = time.StampMilli
StampMicro = time.StampMicro
StampNano = time.StampNano
DateTime = time.DateTime
DateOnly = time.DateOnly
TimeOnly = time.TimeOnly
Nanosecond = time.Nanosecond
Microsecond = time.Microsecond
Millisecond = time.Millisecond
Second = time.Second
Minute = time.Minute
Hour = time.Hour
)
var (
ParseDuration = time.ParseDuration
Since = time.Since
Until = time.Until
FixedZone = time.FixedZone
LoadLocation = time.LoadLocation
LoadLocationFromTZData = time.LoadLocationFromTZData
Date = time.Date
Now = time.Now
Parse = time.Parse
ParseInLocation = time.ParseInLocation
UTC = time.UTC
Unix = time.Unix
UnixMicro = time.UnixMicro
UnixMilli = time.UnixMilli
)
type (
Duration = time.Duration
Location = time.Location
Month = time.Month
ParseError = time.ParseError
Ticker = time.Ticker
Time = time.Time
Timer = time.Timer
Weekday = time.Weekday
)
var (
MaxInternalTimerDelay time.Duration
)
// After overrides the stdlib time.After to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func After(d Duration) <-chan Time {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.After(d)
}
// Sleep overrides the stdlib time.Sleep to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func Sleep(d time.Duration) {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
time.Sleep(d)
}
// Tick overrides the stdlib time.Tick to enforce maximum sleepiness via
// option.MaxInternalTimerDelay.
func Tick(d Duration) <-chan time.Time {
return NewTicker(d).C
}
// NewTicker overrides the stdlib time.NewTicker to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func NewTicker(d Duration) *time.Ticker {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.NewTicker(d)
}
// NewTimer overrides the stdlib time.NewTimer to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func NewTimer(d Duration) *time.Timer {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.NewTimer(d)
}
// NewTimerWithoutMaxDelay returns a time.NewTimer without enforcing maximum
// sleepiness. This function should only be used in cases where the timer firing
// early impacts correctness. If in doubt, you probably should use NewTimer.
func NewTimerWithoutMaxDelay(d Duration) *time.Timer {
return time.NewTimer(d)
}
// AfterFunc overrides the stdlib time.AfterFunc to enforce maximum sleepiness
// via option.MaxInternalTimerDelay.
func AfterFunc(d Duration, f func()) *time.Timer {
if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay {
d = MaxInternalTimerDelay
}
return time.AfterFunc(d, f)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
package version
import (
"encoding/base64"
"encoding/json"
"fmt"
"runtime"
"strings"
)
// CiliumVersion provides a minimal structure to the version string
type CiliumVersion struct {
// Version is the semantic version of Cilium
Version string
// Revision is the short SHA from the last commit
Revision string
// GoRuntimeVersion is the Go version used to run Cilium
GoRuntimeVersion string
// Arch is the architecture where Cilium was compiled
Arch string
// AuthorDate is the git author time reference stored as string ISO 8601 formatted
AuthorDate string
}
// ciliumVersion is set to Cilium's version, revision and git author time reference during build.
var ciliumVersion string
// Version is the complete Cilium version string including Go version.
var Version string
func init() {
// Mimic the output of `go version` and append it to ciliumVersion.
// Report GOOS/GOARCH of the actual binary, not the system it was built on, in case it was
// cross-compiled. See #13122
Version = fmt.Sprintf("%s go version %s %s/%s", ciliumVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
}
// FromString converts a version string into struct
func FromString(versionString string) CiliumVersion {
// string to parse: "0.13.90 a722bdb 2018-01-09T22:32:37+01:00 go version go1.9 linux/amd64"
fields := strings.Split(versionString, " ")
if len(fields) != 7 {
return CiliumVersion{}
}
cver := CiliumVersion{
Version: fields[0],
Revision: fields[1],
AuthorDate: fields[2],
GoRuntimeVersion: fields[5],
Arch: fields[6],
}
return cver
}
// GetCiliumVersion returns a initialized CiliumVersion structure
func GetCiliumVersion() CiliumVersion {
return FromString(Version)
}
// Base64 returns the version in a base64 format.
func Base64() (string, error) {
jsonBytes, err := json.Marshal(Version)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(jsonBytes), nil
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build !windows
package version
import (
"fmt"
"regexp"
"strings"
"github.com/blang/semver/v4"
"golang.org/x/sys/unix"
"github.com/cilium/cilium/pkg/versioncheck"
)
func parseKernelVersion(ver string) (semver.Version, error) {
verStrs := strings.Split(ver, ".")
// We are assuming the kernel version will be one of the following:
// 4.9.17-040917-generic or 4.9-040917-generic or 4-generic
// So as observed, the kernel value is N.N.N-m or N.N-m or N-m
// This implies the len(verStrs) should be between 1 and 3
if len(verStrs) < 1 || len(verStrs) > 3 {
return semver.Version{}, fmt.Errorf("unable to get kernel version from %q", ver)
}
// Given the observations, we use regular expression to extract
// the patch number from the last element of the verStrs array and
// append "0" to the verStrs array in case the until its length is
// 3 as in all cases we want to return from this function :
// Major.Minor.PatchNumber
patch := regexp.MustCompilePOSIX(`^[0-9]+`).FindString(verStrs[len(verStrs)-1])
if patch == "" {
verStrs[len(verStrs)-1] = "0"
} else {
verStrs[len(verStrs)-1] = patch
}
for len(verStrs) < 3 {
verStrs = append(verStrs, "0")
}
return versioncheck.Version(strings.Join(verStrs[:3], "."))
}
// GetKernelVersion returns the version of the Linux kernel running on this host.
func GetKernelVersion() (semver.Version, error) {
var unameBuf unix.Utsname
if err := unix.Uname(&unameBuf); err != nil {
return semver.Version{}, err
}
return parseKernelVersion(string(unameBuf.Release[:]))
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
// Package versioncheck provides utility wrappers for go-version, allowing the
// constraints to be used as global variables.
package versioncheck
import (
"fmt"
"strconv"
"strings"
"github.com/blang/semver/v4"
)
// MustCompile wraps go-version.NewConstraint, panicing when an error is
// returns (this occurs when the constraint cannot be parsed).
// It is intended to be use similar to re.MustCompile, to ensure unparseable
// constraints are caught in testing.
func MustCompile(constraint string) semver.Range {
verCheck, err := Compile(constraint)
if err != nil {
panic(fmt.Errorf("cannot compile go-version constraint '%s': %w", constraint, err))
}
return verCheck
}
// Compile trivially wraps go-version.NewConstraint, returning the constraint
// and error
func Compile(constraint string) (semver.Range, error) {
return semver.ParseRange(constraint)
}
// MustVersion wraps go-version.NewVersion, panicing when an error is
// returns (this occurs when the version cannot be parsed).
func MustVersion(version string) semver.Version {
ver, err := Version(version)
if err != nil {
panic(fmt.Errorf("cannot compile go-version version '%s': %w", version, err))
}
return ver
}
// Version wraps go-version.NewVersion, panicing when an error is
// returns (this occurs when the version cannot be parsed).
func Version(version string) (semver.Version, error) {
ver, err := semver.ParseTolerant(version)
if err != nil {
return ver, err
}
if len(ver.Pre) == 0 {
return ver, nil
}
for _, pre := range ver.Pre {
if strings.Contains(pre.VersionStr, "rc") ||
strings.Contains(pre.VersionStr, "beta") ||
strings.Contains(pre.VersionStr, "alpha") ||
strings.Contains(pre.VersionStr, "snapshot") {
return ver, nil
}
}
strSegments := make([]string, 3)
strSegments[0] = strconv.Itoa(int(ver.Major))
strSegments[1] = strconv.Itoa(int(ver.Minor))
strSegments[2] = strconv.Itoa(int(ver.Patch))
verStr := strings.Join(strSegments, ".")
return semver.ParseTolerant(verStr)
}
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build gofuzz
package fuzz
import (
"github.com/cilium/cilium/pkg/labels"
)
func Fuzz(data []byte) int {
label := labels.NewLabel("test", "label", "1")
err := label.UnmarshalJSON(data)
if err != nil {
return 0
}
return 1
}